Merge r1555021 through r1565516 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1565519 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2014-02-07 02:43:04 +00:00
commit ccf08d9dc8
131 changed files with 5600 additions and 461 deletions

View File

@ -300,6 +300,17 @@ prebuildWithoutPatch () {
{color:red}-1 patch{color}. Trunk compilation may be broken." {color:red}-1 patch{color}. Trunk compilation may be broken."
return 1 return 1
fi fi
echo "$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavadocWarnings.txt 2>&1"
$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavadocWarnings.txt 2>&1
if [[ $? != 0 ]] ; then
echo "Trunk javadoc compilation is broken?"
JIRA_COMMENT="$JIRA_COMMENT
{color:red}-1 patch{color}. Trunk compilation may be broken."
return 1
fi
return 0 return 0
} }
@ -401,6 +412,11 @@ applyPatch () {
} }
############################################################################### ###############################################################################
calculateJavadocWarnings() {
WARNING_FILE="$1"
RET=$(egrep "^[0-9]+ warnings$" "$WARNING_FILE" | awk '{sum+=$1} END {print sum}')
}
### Check there are no javadoc warnings ### Check there are no javadoc warnings
checkJavadocWarnings () { checkJavadocWarnings () {
echo "" echo ""
@ -420,24 +436,29 @@ checkJavadocWarnings () {
(cd hadoop-common-project/hadoop-annotations; $MVN install > /dev/null 2>&1) (cd hadoop-common-project/hadoop-annotations; $MVN install > /dev/null 2>&1)
fi fi
$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1 $MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1
javadocWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavadocWarnings.txt | $AWK '/Javadoc Warnings/,EOF' | $GREP warning | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'` calculateJavadocWarnings "$PATCH_DIR/trunkJavadocWarnings.txt"
echo "" numTrunkJavadocWarnings=$RET
echo "" calculateJavadocWarnings "$PATCH_DIR/patchJavadocWarnings.txt"
echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build." numPatchJavadocWarnings=$RET
grep -i warning "$PATCH_DIR/trunkJavadocWarnings.txt" > "$PATCH_DIR/trunkJavadocWarningsFiltered.txt"
#There are 14 warnings that are caused by things that are caused by using sun internal APIs. grep -i warning "$PATCH_DIR/patchJavadocWarnings.txt" > "$PATCH_DIR/patchJavadocWarningsFiltered.txt"
#There are 2 warnings that are caused by the Apache DS Dn class used in MiniKdc. diff -u "$PATCH_DIR/trunkJavadocWarningsFiltered.txt" \
OK_JAVADOC_WARNINGS=16; "$PATCH_DIR/patchJavadocWarningsFiltered.txt" > \
### if current warnings greater than OK_JAVADOC_WARNINGS "$PATCH_DIR/diffJavadocWarnings.txt"
if [[ $javadocWarnings -ne $OK_JAVADOC_WARNINGS ]] ; then rm -f "$PATCH_DIR/trunkJavadocWarningsFiltered.txt" "$PATCH_DIR/patchJavadocWarningsFiltered.txt"
echo "There appear to be $numTrunkJavadocWarnings javadoc warnings before the patch and $numPatchJavadocWarnings javadoc warnings after applying the patch."
if [[ $numTrunkJavadocWarnings != "" && $numPatchJavadocWarnings != "" ]] ; then
if [[ $numPatchJavadocWarnings -gt $numTrunkJavadocWarnings ]] ; then
JIRA_COMMENT="$JIRA_COMMENT JIRA_COMMENT="$JIRA_COMMENT
{color:red}-1 javadoc{color}. The javadoc tool appears to have generated `expr $(($javadocWarnings-$OK_JAVADOC_WARNINGS))` warning messages." {color:red}-1 javadoc{color}. The javadoc tool appears to have generated `expr $(($numPatchJavadocWarnings-$numTrunkJavadocWarnings))` warning messages.
See $BUILD_URL/artifact/trunk/patchprocess/diffJavadocWarnings.txt for details."
return 1 return 1
fi fi
fi
JIRA_COMMENT="$JIRA_COMMENT JIRA_COMMENT="$JIRA_COMMENT
{color:green}+1 javadoc{color}. The javadoc tool did not generate any warning messages." {color:green}+1 javadoc{color}. There were no new javadoc warning messages."
return 0 return 0
} }

View File

@ -113,6 +113,11 @@ Trunk (Unreleased)
HADOOP-10177. Create CLI tools for managing keys. (Larry McCay via omalley) HADOOP-10177. Create CLI tools for managing keys. (Larry McCay via omalley)
HADOOP-10244. TestKeyShell improperly tests the results of delete (Larry
McCay via omalley)
HADOOP-10325. Improve jenkins javadoc warnings from test-patch.sh (cmccabe)
BUG FIXES BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled. HADOOP-9451. Fault single-layer config if node group topology is enabled.
@ -314,6 +319,15 @@ Release 2.4.0 - UNRELEASED
HADOOP-10320. Javadoc in InterfaceStability.java lacks final </ul>. HADOOP-10320. Javadoc in InterfaceStability.java lacks final </ul>.
(René Nyffenegger via cnauroth) (René Nyffenegger via cnauroth)
HADOOP-10085. CompositeService should allow adding services while being
inited. (Steve Loughran via kasha)
HADOOP-10327. Trunk windows build broken after HDFS-5746.
(Vinay via cnauroth)
HADOOP-10330. TestFrameDecoder fails if it cannot bind port 12345.
(Arpit Agarwal)
Release 2.3.0 - UNRELEASED Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -685,6 +699,8 @@ Release 2.3.0 - UNRELEASED
HADOOP-10311. Cleanup vendor names from the code base. (tucu) HADOOP-10311. Cleanup vendor names from the code base. (tucu)
HADOOP-10273. Fix 'mvn site'. (Arpit Agarwal)
Release 2.2.0 - 2013-10-13 Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -34,13 +34,14 @@ public enum Policy {
HTTPS_ONLY, HTTPS_ONLY,
HTTP_AND_HTTPS; HTTP_AND_HTTPS;
private static final Policy[] VALUES = values();
public static Policy fromString(String value) { public static Policy fromString(String value) {
if (HTTPS_ONLY.name().equalsIgnoreCase(value)) { for (Policy p : VALUES) {
return HTTPS_ONLY; if (p.name().equalsIgnoreCase(value)) {
} else if (HTTP_AND_HTTPS.name().equalsIgnoreCase(value)) { return p;
return HTTP_AND_HTTPS;
} }
return HTTP_ONLY; }
return null;
} }
public boolean isHttpEnabled() { public boolean isHttpEnabled() {

View File

@ -151,6 +151,13 @@ public static final RetryPolicy failoverOnNetworkException(
delayMillis, maxDelayBase); delayMillis, maxDelayBase);
} }
public static final RetryPolicy failoverOnNetworkException(
RetryPolicy fallbackPolicy, int maxFailovers, int maxRetries,
long delayMillis, long maxDelayBase) {
return new FailoverOnNetworkExceptionRetry(fallbackPolicy, maxFailovers,
maxRetries, delayMillis, maxDelayBase);
}
static class TryOnceThenFail implements RetryPolicy { static class TryOnceThenFail implements RetryPolicy {
@Override @Override
public RetryAction shouldRetry(Exception e, int retries, int failovers, public RetryAction shouldRetry(Exception e, int retries, int failovers,
@ -516,18 +523,25 @@ static class FailoverOnNetworkExceptionRetry implements RetryPolicy {
private RetryPolicy fallbackPolicy; private RetryPolicy fallbackPolicy;
private int maxFailovers; private int maxFailovers;
private int maxRetries;
private long delayMillis; private long delayMillis;
private long maxDelayBase; private long maxDelayBase;
public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy, public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
int maxFailovers) { int maxFailovers) {
this(fallbackPolicy, maxFailovers, 0, 0); this(fallbackPolicy, maxFailovers, 0, 0, 0);
} }
public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy, public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
int maxFailovers, long delayMillis, long maxDelayBase) { int maxFailovers, long delayMillis, long maxDelayBase) {
this(fallbackPolicy, maxFailovers, 0, delayMillis, maxDelayBase);
}
public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
int maxFailovers, int maxRetries, long delayMillis, long maxDelayBase) {
this.fallbackPolicy = fallbackPolicy; this.fallbackPolicy = fallbackPolicy;
this.maxFailovers = maxFailovers; this.maxFailovers = maxFailovers;
this.maxRetries = maxRetries;
this.delayMillis = delayMillis; this.delayMillis = delayMillis;
this.maxDelayBase = maxDelayBase; this.maxDelayBase = maxDelayBase;
} }
@ -549,6 +563,10 @@ public RetryAction shouldRetry(Exception e, int retries,
"failovers (" + failovers + ") exceeded maximum allowed (" "failovers (" + failovers + ") exceeded maximum allowed ("
+ maxFailovers + ")"); + maxFailovers + ")");
} }
if (retries - failovers > maxRetries) {
return new RetryAction(RetryAction.RetryDecision.FAIL, 0, "retries ("
+ retries + ") exceeded maximum allowed (" + maxRetries + ")");
}
if (e instanceof ConnectException || if (e instanceof ConnectException ||
e instanceof NoRouteToHostException || e instanceof NoRouteToHostException ||

View File

@ -450,6 +450,14 @@ public void refreshServiceAcl(Configuration conf, PolicyProvider provider) {
serviceAuthorizationManager.refresh(conf, provider); serviceAuthorizationManager.refresh(conf, provider);
} }
/**
* Refresh the service authorization ACL for the service handled by this server
* using the specified Configuration.
*/
public void refreshServiceAclWithConfigration(Configuration conf,
PolicyProvider provider) {
serviceAuthorizationManager.refreshWithConfiguration(conf, provider);
}
/** /**
* Returns a handle to the serviceAuthorizationManager (required in tests) * Returns a handle to the serviceAuthorizationManager (required in tests)
* @return instance of ServiceAuthorizationManager for this server * @return instance of ServiceAuthorizationManager for this server

View File

@ -30,6 +30,8 @@
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.Private @InterfaceAudience.Private
public class ProxyUsers { public class ProxyUsers {
@ -177,4 +179,13 @@ private static boolean isWildcardList(Collection<String> list) {
(list.contains("*")); (list.contains("*"));
} }
@VisibleForTesting
public static Map<String, Collection<String>> getProxyGroups() {
return proxyGroups;
}
@VisibleForTesting
public static Map<String, Collection<String>> getProxyHosts() {
return proxyHosts;
}
} }

View File

@ -33,6 +33,8 @@
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import com.google.common.annotations.VisibleForTesting;
/** /**
* An authorization manager which handles service-level authorization * An authorization manager which handles service-level authorization
* for incoming service requests. * for incoming service requests.
@ -120,7 +122,11 @@ public synchronized void refresh(Configuration conf,
// Make a copy of the original config, and load the policy file // Make a copy of the original config, and load the policy file
Configuration policyConf = new Configuration(conf); Configuration policyConf = new Configuration(conf);
policyConf.addResource(policyFile); policyConf.addResource(policyFile);
refreshWithConfiguration(policyConf, provider);
}
public synchronized void refreshWithConfiguration(Configuration conf,
PolicyProvider provider) {
final Map<Class<?>, AccessControlList> newAcls = final Map<Class<?>, AccessControlList> newAcls =
new IdentityHashMap<Class<?>, AccessControlList>(); new IdentityHashMap<Class<?>, AccessControlList>();
@ -130,7 +136,7 @@ public synchronized void refresh(Configuration conf,
for (Service service : services) { for (Service service : services) {
AccessControlList acl = AccessControlList acl =
new AccessControlList( new AccessControlList(
policyConf.get(service.getServiceKey(), conf.get(service.getServiceKey(),
AccessControlList.WILDCARD_ACL_VALUE) AccessControlList.WILDCARD_ACL_VALUE)
); );
newAcls.put(service.getProtocol(), acl); newAcls.put(service.getProtocol(), acl);
@ -141,8 +147,13 @@ public synchronized void refresh(Configuration conf,
protocolToAcl = newAcls; protocolToAcl = newAcls;
} }
// Package-protected for use in tests. @VisibleForTesting
Set<Class<?>> getProtocolsWithAcls() { public Set<Class<?>> getProtocolsWithAcls() {
return protocolToAcl.keySet(); return protocolToAcl.keySet();
} }
@VisibleForTesting
public AccessControlList getProtocolsAcls(Class<?> className) {
return protocolToAcl.get(className);
}
} }

View File

@ -19,7 +19,6 @@
package org.apache.hadoop.service; package org.apache.hadoop.service;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -54,13 +53,13 @@ public CompositeService(String name) {
} }
/** /**
* Get an unmodifiable list of services * Get a cloned list of services
* @return a list of child services at the time of invocation - * @return a list of child services at the time of invocation -
* added services will not be picked up. * added services will not be picked up.
*/ */
public List<Service> getServices() { public List<Service> getServices() {
synchronized (serviceList) { synchronized (serviceList) {
return Collections.unmodifiableList(serviceList); return new ArrayList<Service>(serviceList);
} }
} }

View File

@ -671,6 +671,7 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mmap(
JNIEnv *env, jclass clazz, jobject jfd, jint jprot, JNIEnv *env, jclass clazz, jobject jfd, jint jprot,
jboolean jshared, jlong length) jboolean jshared, jlong length)
{ {
#ifdef UNIX
void *addr = 0; void *addr = 0;
int prot, flags, fd; int prot, flags, fd;
@ -684,18 +685,33 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mmap(
throw_ioe(env, errno); throw_ioe(env, errno);
} }
return (jlong)(intptr_t)addr; return (jlong)(intptr_t)addr;
#endif // UNIX
#ifdef WINDOWS
THROW(env, "java/io/IOException",
"The function POSIX.mmap() is not supported on Windows");
return NULL;
#endif
} }
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munmap( Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munmap(
JNIEnv *env, jclass clazz, jlong jaddr, jlong length) JNIEnv *env, jclass clazz, jlong jaddr, jlong length)
{ {
#ifdef UNIX
void *addr; void *addr;
addr = (void*)(intptr_t)jaddr; addr = (void*)(intptr_t)jaddr;
if (munmap(addr, length) < 0) { if (munmap(addr, length) < 0) {
throw_ioe(env, errno); throw_ioe(env, errno);
} }
#endif // UNIX
#ifdef WINDOWS
THROW(env, "java/io/IOException",
"The function POSIX.munmap() is not supported on Windows");
return NULL;
#endif
} }
@ -1050,4 +1066,3 @@ JNIEnv *env, jclass clazz)
/** /**
* vim: sw=2: ts=2: et: * vim: sw=2: ts=2: et:
*/ */

View File

@ -352,7 +352,8 @@ Configuration for <<<conf/core-site.xml>>>
| | | This value is deprecated. Use dfs.http.policy | | | | This value is deprecated. Use dfs.http.policy |
*-------------------------+-------------------------+------------------------+ *-------------------------+-------------------------+------------------------+
| <<<dfs.http.policy>>> | <HTTP_ONLY> or <HTTPS_ONLY> or <HTTP_AND_HTTPS> | | | <<<dfs.http.policy>>> | <HTTP_ONLY> or <HTTPS_ONLY> or <HTTP_AND_HTTPS> | |
| | | HTTPS_ONLY turns off http access | | | | HTTPS_ONLY turns off http access. This option takes precedence over |
| | | the deprecated configuration dfs.https.enable and hadoop.ssl.enabled. |
*-------------------------+-------------------------+------------------------+ *-------------------------+-------------------------+------------------------+
| <<<dfs.namenode.https-address>>> | <nn_host_fqdn:50470> | | | <<<dfs.namenode.https-address>>> | <nn_host_fqdn:50470> | |
*-------------------------+-------------------------+------------------------+ *-------------------------+-------------------------+------------------------+

View File

@ -41,7 +41,7 @@ public void setup() throws Exception {
@Test @Test
public void testKeySuccessfulKeyLifecycle() throws Exception { public void testKeySuccessfulKeyLifecycle() throws Exception {
outContent.flush(); outContent.reset();
String[] args1 = {"create", "key1", "--provider", String[] args1 = {"create", "key1", "--provider",
"jceks://file" + tmpDir + "/keystore.jceks"}; "jceks://file" + tmpDir + "/keystore.jceks"};
int rc = 0; int rc = 0;
@ -52,14 +52,14 @@ public void testKeySuccessfulKeyLifecycle() throws Exception {
assertTrue(outContent.toString().contains("key1 has been successfully " + assertTrue(outContent.toString().contains("key1 has been successfully " +
"created.")); "created."));
outContent.flush(); outContent.reset();
String[] args2 = {"list", "--provider", String[] args2 = {"list", "--provider",
"jceks://file" + tmpDir + "/keystore.jceks"}; "jceks://file" + tmpDir + "/keystore.jceks"};
rc = ks.run(args2); rc = ks.run(args2);
assertEquals(0, rc); assertEquals(0, rc);
assertTrue(outContent.toString().contains("key1")); assertTrue(outContent.toString().contains("key1"));
outContent.flush(); outContent.reset();
String[] args3 = {"roll", "key1", "--provider", String[] args3 = {"roll", "key1", "--provider",
"jceks://file" + tmpDir + "/keystore.jceks"}; "jceks://file" + tmpDir + "/keystore.jceks"};
rc = ks.run(args3); rc = ks.run(args3);
@ -67,7 +67,7 @@ public void testKeySuccessfulKeyLifecycle() throws Exception {
assertTrue(outContent.toString().contains("key1 has been successfully " + assertTrue(outContent.toString().contains("key1 has been successfully " +
"rolled.")); "rolled."));
outContent.flush(); outContent.reset();
String[] args4 = {"delete", "key1", "--provider", String[] args4 = {"delete", "key1", "--provider",
"jceks://file" + tmpDir + "/keystore.jceks"}; "jceks://file" + tmpDir + "/keystore.jceks"};
rc = ks.run(args4); rc = ks.run(args4);
@ -75,12 +75,12 @@ public void testKeySuccessfulKeyLifecycle() throws Exception {
assertTrue(outContent.toString().contains("key1 has been successfully " + assertTrue(outContent.toString().contains("key1 has been successfully " +
"deleted.")); "deleted."));
outContent.flush(); outContent.reset();
String[] args5 = {"list", "--provider", String[] args5 = {"list", "--provider",
"jceks://file" + tmpDir + "/keystore.jceks"}; "jceks://file" + tmpDir + "/keystore.jceks"};
rc = ks.run(args5); rc = ks.run(args5);
assertEquals(0, rc); assertEquals(0, rc);
assertTrue(outContent.toString().contains("key1")); assertFalse(outContent.toString(), outContent.toString().contains("key1"));
} }
@Test @Test
@ -165,7 +165,7 @@ public void testFullCipher() throws Exception {
assertTrue(outContent.toString().contains("key1 has been successfully " + assertTrue(outContent.toString().contains("key1 has been successfully " +
"created.")); "created."));
outContent.flush(); outContent.reset();
String[] args2 = {"delete", "key1", "--provider", String[] args2 = {"delete", "key1", "--provider",
"jceks://file" + tmpDir + "/keystore.jceks"}; "jceks://file" + tmpDir + "/keystore.jceks"};
rc = ks.run(args2); rc = ks.run(args2);

View File

@ -16,26 +16,20 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.yarn.util; package org.apache.hadoop.service;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.Service.STATE;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.BreakableService;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.service.Service.STATE;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.junit.Before;
import org.junit.Test;
public class TestCompositeService { public class TestCompositeService {
private static final int NUM_OF_SERVICES = 5; private static final int NUM_OF_SERVICES = 5;
@ -156,7 +150,7 @@ public void testServiceStartup() {
try { try {
serviceManager.start(); serviceManager.start();
fail("Exception should have been thrown due to startup failure of last service"); fail("Exception should have been thrown due to startup failure of last service");
} catch (YarnRuntimeException e) { } catch (ServiceTestRuntimeException e) {
for (int i = 0; i < NUM_OF_SERVICES - 1; i++) { for (int i = 0; i < NUM_OF_SERVICES - 1; i++) {
if (i >= FAILED_SERVICE_SEQ_NUMBER && STOP_ONLY_STARTED_SERVICES) { if (i >= FAILED_SERVICE_SEQ_NUMBER && STOP_ONLY_STARTED_SERVICES) {
// Failed service state should be INITED // Failed service state should be INITED
@ -197,7 +191,7 @@ public void testServiceStop() {
// Stop the composite service // Stop the composite service
try { try {
serviceManager.stop(); serviceManager.stop();
} catch (YarnRuntimeException e) { } catch (ServiceTestRuntimeException e) {
} }
assertInState(STATE.STOPPED, services); assertInState(STATE.STOPPED, services);
} }
@ -338,6 +332,40 @@ public void serviceInit(Configuration conf) {
1, testService.getServices().size()); 1, testService.getServices().size());
} }
@Test(timeout = 1000)
public void testAddInitedSiblingInInit() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService sibling = new BreakableService();
sibling.init(new Configuration());
parent.addService(new AddSiblingService(parent,
sibling,
STATE.INITED));
parent.init(new Configuration());
parent.start();
parent.stop();
assertEquals("Incorrect number of services",
2, parent.getServices().size());
}
@Test(timeout = 1000)
public void testAddUninitedSiblingInInit() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService sibling = new BreakableService();
parent.addService(new AddSiblingService(parent,
sibling,
STATE.INITED));
parent.init(new Configuration());
try {
parent.start();
fail("Expected an exception, got " + parent);
} catch (ServiceStateException e) {
//expected
}
parent.stop();
assertEquals("Incorrect number of services",
2, parent.getServices().size());
}
@Test @Test
public void testRemoveService() { public void testRemoveService() {
CompositeService testService = new CompositeService("TestService") { CompositeService testService = new CompositeService("TestService") {
@ -365,6 +393,105 @@ public void serviceInit(Configuration conf) {
2, testService.getServices().size()); 2, testService.getServices().size());
} }
@Test(timeout = 1000)
public void testAddStartedChildBeforeInit() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService child = new BreakableService();
child.init(new Configuration());
child.start();
AddSiblingService.addChildToService(parent, child);
try {
parent.init(new Configuration());
fail("Expected an exception, got " + parent);
} catch (ServiceStateException e) {
//expected
}
parent.stop();
}
@Test(timeout = 1000)
public void testAddStoppedChildBeforeInit() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService child = new BreakableService();
child.init(new Configuration());
child.start();
child.stop();
AddSiblingService.addChildToService(parent, child);
try {
parent.init(new Configuration());
fail("Expected an exception, got " + parent);
} catch (ServiceStateException e) {
//expected
}
parent.stop();
}
@Test(timeout = 1000)
public void testAddStartedSiblingInStart() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService sibling = new BreakableService();
sibling.init(new Configuration());
sibling.start();
parent.addService(new AddSiblingService(parent,
sibling,
STATE.STARTED));
parent.init(new Configuration());
parent.start();
parent.stop();
assertEquals("Incorrect number of services",
2, parent.getServices().size());
}
@Test(timeout = 1000)
public void testAddUninitedSiblingInStart() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService sibling = new BreakableService();
parent.addService(new AddSiblingService(parent,
sibling,
STATE.STARTED));
parent.init(new Configuration());
assertInState(STATE.NOTINITED, sibling);
parent.start();
parent.stop();
assertEquals("Incorrect number of services",
2, parent.getServices().size());
}
@Test(timeout = 1000)
public void testAddStartedSiblingInInit() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService sibling = new BreakableService();
sibling.init(new Configuration());
sibling.start();
parent.addService(new AddSiblingService(parent,
sibling,
STATE.INITED));
parent.init(new Configuration());
assertInState(STATE.STARTED, sibling);
parent.start();
assertInState(STATE.STARTED, sibling);
parent.stop();
assertEquals("Incorrect number of services",
2, parent.getServices().size());
assertInState(STATE.STOPPED, sibling);
}
@Test(timeout = 1000)
public void testAddStartedSiblingInStop() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService sibling = new BreakableService();
sibling.init(new Configuration());
sibling.start();
parent.addService(new AddSiblingService(parent,
sibling,
STATE.STOPPED));
parent.init(new Configuration());
parent.start();
parent.stop();
assertEquals("Incorrect number of services",
2, parent.getServices().size());
}
public static class CompositeServiceAddingAChild extends CompositeService{ public static class CompositeServiceAddingAChild extends CompositeService{
Service child; Service child;
@ -380,6 +507,17 @@ protected void serviceInit(Configuration conf) throws Exception {
} }
} }
public static class ServiceTestRuntimeException extends RuntimeException {
public ServiceTestRuntimeException(String message) {
super(message);
}
}
/**
* This is a composite service that keeps a count of the number of lifecycle
* events called, and can be set to throw a {@link ServiceTestRuntimeException }
* during service start or stop
*/
public static class CompositeServiceImpl extends CompositeService { public static class CompositeServiceImpl extends CompositeService {
public static boolean isPolicyToStopOnlyStartedServices() { public static boolean isPolicyToStopOnlyStartedServices() {
@ -408,7 +546,7 @@ protected void serviceInit(Configuration conf) throws Exception {
@Override @Override
protected void serviceStart() throws Exception { protected void serviceStart() throws Exception {
if (throwExceptionOnStart) { if (throwExceptionOnStart) {
throw new YarnRuntimeException("Fake service start exception"); throw new ServiceTestRuntimeException("Fake service start exception");
} }
counter++; counter++;
callSequenceNumber = counter; callSequenceNumber = counter;
@ -420,7 +558,7 @@ protected void serviceStop() throws Exception {
counter++; counter++;
callSequenceNumber = counter; callSequenceNumber = counter;
if (throwExceptionOnStop) { if (throwExceptionOnStop) {
throw new YarnRuntimeException("Fake service stop exception"); throw new ServiceTestRuntimeException("Fake service stop exception");
} }
super.serviceStop(); super.serviceStop();
} }
@ -457,6 +595,9 @@ public String toString() {
} }
/**
* Composite service that makes the addService method public to all
*/
public static class ServiceManager extends CompositeService { public static class ServiceManager extends CompositeService {
public void addTestService(CompositeService service) { public void addTestService(CompositeService service) {
@ -468,4 +609,55 @@ public ServiceManager(String name) {
} }
} }
public static class AddSiblingService extends CompositeService {
private final CompositeService parent;
private final Service serviceToAdd;
private STATE triggerState;
public AddSiblingService(CompositeService parent,
Service serviceToAdd,
STATE triggerState) {
super("ParentStateManipulatorService");
this.parent = parent;
this.serviceToAdd = serviceToAdd;
this.triggerState = triggerState;
}
/**
* Add the serviceToAdd to the parent if this service
* is in the state requested
*/
private void maybeAddSibling() {
if (getServiceState() == triggerState) {
parent.addService(serviceToAdd);
}
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
maybeAddSibling();
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
maybeAddSibling();
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
maybeAddSibling();
super.serviceStop();
}
/**
* Expose addService method
* @param parent parent service
* @param child child to add
*/
public static void addChildToService(CompositeService parent, Service child) {
parent.addService(child);
}
}
} }

View File

@ -51,14 +51,6 @@ public class IdUserGroup {
private long lastUpdateTime = 0; // Last time maps were updated private long lastUpdateTime = 0; // Last time maps were updated
static public class DuplicateNameOrIdException extends IOException {
private static final long serialVersionUID = 1L;
public DuplicateNameOrIdException(String msg) {
super(msg);
}
}
public IdUserGroup() throws IOException { public IdUserGroup() throws IOException {
updateMaps(); updateMaps();
} }
@ -80,7 +72,8 @@ private void checkAndUpdateMaps() {
} }
} }
private static final String DUPLICATE_NAME_ID_DEBUG_INFO = "NFS gateway can't start with duplicate name or id on the host system.\n" private static final String DUPLICATE_NAME_ID_DEBUG_INFO =
"NFS gateway could have problem starting with duplicate name or id on the host system.\n"
+ "This is because HDFS (non-kerberos cluster) uses name as the only way to identify a user or group.\n" + "This is because HDFS (non-kerberos cluster) uses name as the only way to identify a user or group.\n"
+ "The host system with duplicated user/group name or id might work fine most of the time by itself.\n" + "The host system with duplicated user/group name or id might work fine most of the time by itself.\n"
+ "However when NFS gateway talks to HDFS, HDFS accepts only user and group name.\n" + "However when NFS gateway talks to HDFS, HDFS accepts only user and group name.\n"
@ -88,6 +81,16 @@ private void checkAndUpdateMaps() {
+ "<getent passwd | cut -d: -f1,3> and <getent group | cut -d: -f1,3> on Linux systms,\n" + "<getent passwd | cut -d: -f1,3> and <getent group | cut -d: -f1,3> on Linux systms,\n"
+ "<dscl . -list /Users UniqueID> and <dscl . -list /Groups PrimaryGroupID> on MacOS."; + "<dscl . -list /Users UniqueID> and <dscl . -list /Groups PrimaryGroupID> on MacOS.";
private static void reportDuplicateEntry(final String header,
final Integer key, final String value,
final Integer ekey, final String evalue) {
LOG.warn("\n" + header + String.format(
"new entry (%d, %s), existing entry: (%d, %s).\n%s\n%s",
key, value, ekey, evalue,
"The new entry is to be ignored for the following reason.",
DUPLICATE_NAME_ID_DEBUG_INFO));
}
/** /**
* Get the whole list of users and groups and save them in the maps. * Get the whole list of users and groups and save them in the maps.
* @throws IOException * @throws IOException
@ -108,22 +111,27 @@ public static void updateMapInternal(BiMap<Integer, String> map, String mapName,
} }
LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]); LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]);
// HDFS can't differentiate duplicate names with simple authentication // HDFS can't differentiate duplicate names with simple authentication
Integer key = Integer.valueOf(nameId[1]); final Integer key = Integer.valueOf(nameId[1]);
String value = nameId[0]; final String value = nameId[0];
if (map.containsKey(key)) { if (map.containsKey(key)) {
LOG.error(String.format( final String prevValue = map.get(key);
"Got duplicate id:(%d, %s), existing entry: (%d, %s).\n%s", key, if (value.equals(prevValue)) {
value, key, map.get(key), DUPLICATE_NAME_ID_DEBUG_INFO)); // silently ignore equivalent entries
throw new DuplicateNameOrIdException("Got duplicate id."); continue;
} }
if (map.containsValue(nameId[0])) { reportDuplicateEntry(
LOG.error(String.format( "Got multiple names associated with the same id: ",
"Got duplicate name:(%d, %s), existing entry: (%d, %s) \n%s", key, value, key, prevValue);
key, value, map.inverse().get(value), value, continue;
DUPLICATE_NAME_ID_DEBUG_INFO));
throw new DuplicateNameOrIdException("Got duplicate name");
} }
map.put(Integer.valueOf(nameId[1]), nameId[0]); if (map.containsValue(value)) {
final Integer prevKey = map.inverse().get(value);
reportDuplicateEntry(
"Got multiple ids associated with the same name: ",
key, value, prevKey, value);
continue;
}
map.put(key, value);
} }
LOG.info("Updated " + mapName + " map size:" + map.size()); LOG.info("Updated " + mapName + " map size:" + map.size());

View File

@ -17,11 +17,10 @@
*/ */
package org.apache.hadoop.nfs.nfs3; package org.apache.hadoop.nfs.nfs3;
import static org.junit.Assert.fail; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.IdUserGroup.DuplicateNameOrIdException;
import org.junit.Test; import org.junit.Test;
import com.google.common.collect.BiMap; import com.google.common.collect.BiMap;
@ -33,24 +32,36 @@ public class TestIdUserGroup {
public void testDuplicates() throws IOException { public void testDuplicates() throws IOException {
String GET_ALL_USERS_CMD = "echo \"root:x:0:0:root:/root:/bin/bash\n" String GET_ALL_USERS_CMD = "echo \"root:x:0:0:root:/root:/bin/bash\n"
+ "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n" + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+ "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\"" + "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+ "hdfs1:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+ "hdfs2:x:11502:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+ "bin:x:2:2:bin:/bin:/bin/sh\n"
+ "bin:x:1:1:bin:/bin:/sbin/nologin\n"
+ "daemon:x:1:1:daemon:/usr/sbin:/bin/sh\n"
+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""
+ " | cut -d: -f1,3"; + " | cut -d: -f1,3";
String GET_ALL_GROUPS_CMD = "echo \"hdfs:*:11501:hrt_hdfs\n" String GET_ALL_GROUPS_CMD = "echo \"hdfs:*:11501:hrt_hdfs\n"
+ "mapred:x:497\n" + "mapred2:x:497\"" + " | cut -d: -f1,3"; + "mapred:x:497\n"
+ "mapred2:x:497\n"
+ "mapred:x:498\n"
+ "mapred3:x:498\""
+ " | cut -d: -f1,3";
// Maps for id to name map // Maps for id to name map
BiMap<Integer, String> uMap = HashBiMap.create(); BiMap<Integer, String> uMap = HashBiMap.create();
BiMap<Integer, String> gMap = HashBiMap.create(); BiMap<Integer, String> gMap = HashBiMap.create();
try {
IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":"); IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":");
fail("didn't detect the duplicate name"); assertTrue(uMap.size() == 5);
} catch (DuplicateNameOrIdException e) { assertEquals(uMap.get(0), "root");
} assertEquals(uMap.get(11501), "hdfs");
assertEquals(uMap.get(11502), "hdfs2");
assertEquals(uMap.get(2), "bin");
assertEquals(uMap.get(1), "daemon");
try {
IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":"); IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":");
fail("didn't detect the duplicate id"); assertTrue(gMap.size() == 3);
} catch (DuplicateNameOrIdException e) { assertEquals(gMap.get(11501), "hdfs");
} assertEquals(gMap.get(497), "mapred");
assertEquals(gMap.get(498), "mapred3");
} }
} }

View File

@ -23,6 +23,7 @@
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.Random;
import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder; import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
import org.apache.hadoop.oncrpc.security.CredentialsNone; import org.apache.hadoop.oncrpc.security.CredentialsNone;
@ -31,17 +32,17 @@
import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelException;
import org.jboss.netty.channel.ChannelHandlerContext; import org.jboss.netty.channel.ChannelHandlerContext;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito; import org.mockito.Mockito;
public class TestFrameDecoder { public class TestFrameDecoder {
private static int port = 12345; // some random server port
private static int resultSize; private static int resultSize;
static void testRequest(XDR request) { static void testRequest(XDR request, int serverPort) {
SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", port, request, SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", serverPort, request,
true); true);
tcpClient.run(); tcpClient.run();
} }
@ -148,10 +149,25 @@ public void testMultipleFrames() {
@Test @Test
public void testFrames() { public void testFrames() {
Random rand = new Random();
int serverPort = 30000 + rand.nextInt(10000);
int retries = 10; // A few retries in case initial choice is in use.
while (true) {
try {
RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram", RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
"localhost", port, 100000, 1, 2); "localhost", serverPort, 100000, 1, 2);
SimpleTcpServer tcpServer = new SimpleTcpServer(port, program, 1); SimpleTcpServer tcpServer = new SimpleTcpServer(serverPort, program, 1);
tcpServer.run(); tcpServer.run();
break; // Successfully bound a port, break out.
} catch (ChannelException ce) {
if (retries-- > 0) {
serverPort += rand.nextInt(20); // Port in use? Try another.
} else {
throw ce; // Out of retries.
}
}
}
XDR xdrOut = createGetportMount(); XDR xdrOut = createGetportMount();
int headerSize = xdrOut.size(); int headerSize = xdrOut.size();
@ -161,7 +177,7 @@ public void testFrames() {
int requestSize = xdrOut.size() - headerSize; int requestSize = xdrOut.size() - headerSize;
// Send the request to the server // Send the request to the server
testRequest(xdrOut); testRequest(xdrOut, serverPort);
// Verify the server got the request with right size // Verify the server got the request with right size
assertEquals(requestSize, resultSize); assertEquals(requestSize, resultSize);

View File

@ -308,6 +308,9 @@ Release 2.4.0 - UNRELEASED
HDFS-5746. Add ShortCircuitSharedMemorySegment (cmccabe) HDFS-5746. Add ShortCircuitSharedMemorySegment (cmccabe)
HDFS-4911. Reduce PeerCache timeout to be commensurate with
dfs.datanode.socket.reuse.keepalive (cmccabe)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery
@ -324,6 +327,27 @@ Release 2.4.0 - UNRELEASED
HDFS-5856. DataNode.checkDiskError might throw NPE. HDFS-5856. DataNode.checkDiskError might throw NPE.
(Josh Elser via suresh) (Josh Elser via suresh)
HDFS-5828. BlockPlacementPolicyWithNodeGroup can place multiple replicas on
the same node group when dfs.namenode.avoid.write.stale.datanode is true.
(Buddy via junping_du)
HDFS-5767. NFS implementation assumes userName userId mapping to be unique,
which is not true sometimes (Yongjun Zhang via brandonli)
HDFS-5791. TestHttpsFileSystem should use a random port to avoid binding
error during testing (Haohui Mai via brandonli)
HDFS-5709. Improve NameNode upgrade with existing reserved paths and path
components. (Andrew Wang via atm)
HDFS-5881. Fix skip() of the short-circuit local reader(legacy). (kihwal)
HDFS-5895. HDFS cacheadmin -listPools has exit_code of 1 when the command
returns 0 result. (Tassapol Athiapinya via cnauroth)
HDFS-5807. TestBalancerWithNodeGroup.testBalancerWithNodeGroup fails
intermittently. (Chen He via kihwal)
Release 2.3.0 - UNRELEASED Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -853,6 +877,15 @@ Release 2.3.0 - UNRELEASED
HDFS-5842. Cannot create hftp filesystem when using a proxy user ugi and a doAs HDFS-5842. Cannot create hftp filesystem when using a proxy user ugi and a doAs
on a secure cluster. (jing9) on a secure cluster. (jing9)
HDFS-5399. Revisit SafeModeException and corresponding retry policies.
(Jing Zhao via todd)
HDFS-5876. SecureDataNodeStarter does not pick up configuration in
hdfs-site.xml. (Haohui Mai via jing9)
HDFS-5873. dfs.http.policy should have higher precedence over dfs.https.enable.
(Haohui Mai via jing9)
BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS
HDFS-4985. Add storage type to the protocol and expose it in block report HDFS-4985. Add storage type to the protocol and expose it in block report

View File

@ -629,7 +629,7 @@ public synchronized long skip(long n) throws IOException {
skipBuf = new byte[bytesPerChecksum]; skipBuf = new byte[bytesPerChecksum];
} }
int ret = read(skipBuf, 0, (int)(n - remaining)); int ret = read(skipBuf, 0, (int)(n - remaining));
return ret; return (remaining + ret);
} }
// optimize for big gap: discard the current buffer, skip to // optimize for big gap: discard the current buffer, skip to
@ -660,9 +660,9 @@ public synchronized long skip(long n) throws IOException {
int ret = read(skipBuf, 0, myOffsetFromChunkBoundary); int ret = read(skipBuf, 0, myOffsetFromChunkBoundary);
if (ret == -1) { // EOS if (ret == -1) { // EOS
return toskip; return (toskip + remaining);
} else { } else {
return (toskip + ret); return (toskip + remaining + ret);
} }
} }

View File

@ -36,6 +36,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
@ -260,6 +262,7 @@ synchronized void unref(ClientMmapManager mmapManager) {
public static class Conf { public static class Conf {
final int hdfsTimeout; // timeout value for a DFS operation. final int hdfsTimeout; // timeout value for a DFS operation.
final int maxFailoverAttempts; final int maxFailoverAttempts;
final int maxRetryAttempts;
final int failoverSleepBaseMillis; final int failoverSleepBaseMillis;
final int failoverSleepMaxMillis; final int failoverSleepMaxMillis;
final int maxBlockAcquireFailures; final int maxBlockAcquireFailures;
@ -305,6 +308,9 @@ public Conf(Configuration conf) {
maxFailoverAttempts = conf.getInt( maxFailoverAttempts = conf.getInt(
DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
maxRetryAttempts = conf.getInt(
DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
failoverSleepBaseMillis = conf.getInt( failoverSleepBaseMillis = conf.getInt(
DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY, DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT); DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);

View File

@ -82,9 +82,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_DEFAULT = 0; public static final int DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_DEFAULT = 0;
public static final String DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY = "dfs.client.failover.connection.retries.on.timeouts"; public static final String DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY = "dfs.client.failover.connection.retries.on.timeouts";
public static final int DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0; public static final int DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0;
public static final String DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY = "dfs.client.retry.max.attempts";
public static final int DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
public static final String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = "dfs.client.socketcache.expiryMsec"; public static final String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = "dfs.client.socketcache.expiryMsec";
public static final long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 2 * 60 * 1000; public static final long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 3000;
public static final String DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL = "dfs.client.write.exclude.nodes.cache.expiry.interval.millis"; public static final String DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL = "dfs.client.write.exclude.nodes.cache.expiry.interval.millis";
public static final long DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT = 10 * 60 * 1000; // 10 minutes, in ms public static final long DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT = 10 * 60 * 1000; // 10 minutes, in ms
public static final String DFS_NAMENODE_BACKUP_ADDRESS_KEY = "dfs.namenode.backup.address"; public static final String DFS_NAMENODE_BACKUP_ADDRESS_KEY = "dfs.namenode.backup.address";
@ -213,7 +215,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_SYNCONCLOSE_KEY = "dfs.datanode.synconclose"; public static final String DFS_DATANODE_SYNCONCLOSE_KEY = "dfs.datanode.synconclose";
public static final boolean DFS_DATANODE_SYNCONCLOSE_DEFAULT = false; public static final boolean DFS_DATANODE_SYNCONCLOSE_DEFAULT = false;
public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive"; public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive";
public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000; public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 4000;
public static final String DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY = "dfs.namenode.datanode.registration.ip-hostname-check"; public static final String DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY = "dfs.namenode.datanode.registration.ip-hostname-check";
public static final boolean DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT = true; public static final boolean DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT = true;
@ -576,6 +578,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,... public static final String DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,...
public static final String DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = "dfs.http.client.failover.max.attempts"; public static final String DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = "dfs.http.client.failover.max.attempts";
public static final int DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = 15; public static final int DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = 15;
public static final String DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY = "dfs.http.client.retry.max.attempts";
public static final int DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY = "dfs.http.client.failover.sleep.base.millis"; public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY = "dfs.http.client.failover.sleep.base.millis";
public static final int DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT = 500; public static final int DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT = 500;
public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY = "dfs.http.client.failover.sleep.max.millis"; public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY = "dfs.http.client.failover.sleep.max.millis";

View File

@ -1345,6 +1345,14 @@ public synchronized void seek(long targetPos) throws IOException {
pos += blockReader.skip(diff); pos += blockReader.skip(diff);
if (pos == targetPos) { if (pos == targetPos) {
done = true; done = true;
} else {
// The range was already checked. If the block reader returns
// something unexpected instead of throwing an exception, it is
// most likely a bug.
String errMsg = "BlockReader failed to seek to " +
targetPos + ". Instead, it seeked to " + pos + ".";
DFSClient.LOG.warn(errMsg);
throw new IOException(errMsg);
} }
} catch (IOException e) {//make following read to retry } catch (IOException e) {//make following read to retry
if(DFSClient.LOG.isDebugEnabled()) { if(DFSClient.LOG.isDebugEnabled()) {

View File

@ -261,6 +261,47 @@ public static boolean isValidName(String src) {
return true; return true;
} }
/**
* Checks if a string is a valid path component. For instance, components
* cannot contain a ":" or "/", and cannot be equal to a reserved component
* like ".snapshot".
* <p>
* The primary use of this method is for validating paths when loading the
* FSImage. During normal NN operation, paths are sometimes allowed to
* contain reserved components.
*
* @return If component is valid
*/
public static boolean isValidNameForComponent(String component) {
if (component.equals(".") ||
component.equals("..") ||
component.indexOf(":") >= 0 ||
component.indexOf("/") >= 0) {
return false;
}
return !isReservedPathComponent(component);
}
/**
* Returns if the component is reserved.
*
* <p>
* Note that some components are only reserved under certain directories, e.g.
* "/.reserved" is reserved, while "/hadoop/.reserved" is not.
*
* @param component
* @return if the component is reserved
*/
public static boolean isReservedPathComponent(String component) {
for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) {
if (component.equals(reserved)) {
return true;
}
}
return false;
}
/** /**
* Converts a byte array to a string using UTF8 encoding. * Converts a byte array to a string using UTF8 encoding.
*/ */
@ -313,6 +354,24 @@ public static String byteArray2PathString(byte[][] pathComponents) {
return result.toString(); return result.toString();
} }
/**
* Converts a list of path components into a path using Path.SEPARATOR.
*
* @param components Path components
* @return Combined path as a UTF-8 string
*/
public static String strings2PathString(String[] components) {
if (components.length == 0) {
return "";
}
if (components.length == 1) {
if (components[0] == null || components[0].isEmpty()) {
return Path.SEPARATOR;
}
}
return Joiner.on(Path.SEPARATOR).join(components);
}
/** /**
* Given a list of path components returns a byte array * Given a list of path components returns a byte array
*/ */
@ -1508,31 +1567,34 @@ public static String getSpnegoKeytabKey(Configuration conf, String defaultKey) {
* configuration settings. * configuration settings.
*/ */
public static HttpConfig.Policy getHttpPolicy(Configuration conf) { public static HttpConfig.Policy getHttpPolicy(Configuration conf) {
String httpPolicy = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY, String policyStr = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY);
DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT); if (policyStr == null) {
boolean https = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
HttpConfig.Policy policy = HttpConfig.Policy.fromString(httpPolicy);
if (policy == HttpConfig.Policy.HTTP_ONLY) {
boolean httpsEnabled = conf.getBoolean(
DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT); DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT);
boolean hadoopSslEnabled = conf.getBoolean( boolean hadoopSsl = conf.getBoolean(
CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY, CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY,
CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT); CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT);
if (hadoopSslEnabled) { if (hadoopSsl) {
LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY
+ " is deprecated. Please use " + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
+ DFSConfigKeys.DFS_HTTPS_ENABLE_KEY + "."); + ".");
policy = HttpConfig.Policy.HTTPS_ONLY;
} else if (httpsEnabled) {
LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
+ " is deprecated. Please use "
+ DFSConfigKeys.DFS_HTTPS_ENABLE_KEY + ".");
policy = HttpConfig.Policy.HTTP_AND_HTTPS;
} }
if (https) {
LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
+ " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
+ ".");
}
return (hadoopSsl || https) ? HttpConfig.Policy.HTTP_AND_HTTPS
: HttpConfig.Policy.HTTP_ONLY;
}
HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
if (policy == null) {
throw new HadoopIllegalArgumentException("Unregonized value '"
+ policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY);
} }
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name()); conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());

View File

@ -24,6 +24,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
import java.io.IOException; import java.io.IOException;
import java.lang.reflect.Constructor; import java.lang.reflect.Constructor;
@ -144,9 +146,10 @@ public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
.createFailoverProxyProvider(conf, failoverProxyProviderClass, xface, .createFailoverProxyProvider(conf, failoverProxyProviderClass, xface,
nameNodeUri); nameNodeUri);
Conf config = new Conf(conf); Conf config = new Conf(conf);
T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, RetryPolicies T proxy = (T) RetryProxy.create(xface, failoverProxyProvider,
.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL, RetryPolicies.failoverOnNetworkException(
config.maxFailoverAttempts, config.failoverSleepBaseMillis, RetryPolicies.TRY_ONCE_THEN_FAIL, config.maxFailoverAttempts,
config.maxRetryAttempts, config.failoverSleepBaseMillis,
config.failoverSleepMaxMillis)); config.failoverSleepMaxMillis));
Text dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri); Text dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
@ -192,11 +195,14 @@ public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
int maxFailoverAttempts = config.getInt( int maxFailoverAttempts = config.getInt(
DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
int maxRetryAttempts = config.getInt(
DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
InvocationHandler dummyHandler = new LossyRetryInvocationHandler<T>( InvocationHandler dummyHandler = new LossyRetryInvocationHandler<T>(
numResponseToDrop, failoverProxyProvider, numResponseToDrop, failoverProxyProvider,
RetryPolicies.failoverOnNetworkException( RetryPolicies.failoverOnNetworkException(
RetryPolicies.TRY_ONCE_THEN_FAIL, RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts,
Math.max(numResponseToDrop + 1, maxFailoverAttempts), delay, Math.max(numResponseToDrop + 1, maxRetryAttempts), delay,
maxCap)); maxCap));
T proxy = (T) Proxy.newProxyInstance( T proxy = (T) Proxy.newProxyInstance(

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map.Entry; import java.util.Map.Entry;
@ -25,6 +26,7 @@
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.LinkedListMultimap; import com.google.common.collect.LinkedListMultimap;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
@ -118,6 +120,11 @@ public static synchronized PeerCache getInstance(int c, long e) {
return instance; return instance;
} }
@VisibleForTesting
public static synchronized void setInstance(int c, long e) {
instance = new PeerCache(c, e);
}
private boolean isDaemonStarted() { private boolean isDaemonStarted() {
return (daemon == null)? false: true; return (daemon == null)? false: true;
} }
@ -171,8 +178,17 @@ public synchronized Peer get(DatanodeID dnId, boolean isDomain) {
while (iter.hasNext()) { while (iter.hasNext()) {
Value candidate = iter.next(); Value candidate = iter.next();
iter.remove(); iter.remove();
if (!candidate.getPeer().isClosed()) { long ageMs = Time.monotonicNow() - candidate.getTime();
return candidate.getPeer(); Peer peer = candidate.getPeer();
if (ageMs >= expiryPeriod) {
try {
peer.close();
} catch (IOException e) {
LOG.warn("got IOException closing stale peer " + peer +
", which is " + ageMs + " ms old");
}
} else if (!peer.isClosed()) {
return peer;
} }
} }
return null; return null;

View File

@ -25,10 +25,9 @@
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion; import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
/************************************ /************************************
* Some handy constants * Some handy constants
@ -142,6 +141,16 @@ public static enum DatanodeReportType {
public static final int DATANODE_LAYOUT_VERSION public static final int DATANODE_LAYOUT_VERSION
= DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION; = DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
/**
* Path components that are reserved in HDFS.
* <p>
* .reserved is only reserved under root ("/").
*/
public static final String[] RESERVED_PATH_COMPONENTS = new String[] {
HdfsConstants.DOT_SNAPSHOT_DIR,
FSDirectory.DOT_RESERVED_STRING
};
/** /**
* A special path component contained in the path for a snapshot file/dir * A special path component contained in the path for a snapshot file/dir
*/ */

View File

@ -317,7 +317,7 @@ private Node chooseTarget(int numOfReplicas,
// We need to additionally exclude the nodes that were added to the // We need to additionally exclude the nodes that were added to the
// result list in the successful calls to choose*() above. // result list in the successful calls to choose*() above.
for (DatanodeStorageInfo resultStorage : results) { for (DatanodeStorageInfo resultStorage : results) {
oldExcludedNodes.add(resultStorage.getDatanodeDescriptor()); addToExcludedNodes(resultStorage.getDatanodeDescriptor(), oldExcludedNodes);
} }
// Set numOfReplicas, since it can get out of sync with the result list // Set numOfReplicas, since it can get out of sync with the result list
// if the NotEnoughReplicasException was thrown in chooseRandom(). // if the NotEnoughReplicasException was thrown in chooseRandom().

View File

@ -79,7 +79,8 @@ static public enum StartupOption{
INITIALIZESHAREDEDITS("-initializeSharedEdits"), INITIALIZESHAREDEDITS("-initializeSharedEdits"),
RECOVER ("-recover"), RECOVER ("-recover"),
FORCE("-force"), FORCE("-force"),
NONINTERACTIVE("-nonInteractive"); NONINTERACTIVE("-nonInteractive"),
RENAMERESERVED("-renameReserved");
private final String name; private final String name;

View File

@ -362,13 +362,13 @@ private void startInfoServer(Configuration conf) throws IOException {
.setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))); .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
String infoHost = infoSocAddr.getHostName();
if (policy.isHttpEnabled()) { if (policy.isHttpEnabled()) {
if (secureResources == null) { if (secureResources == null) {
InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
int port = infoSocAddr.getPort(); int port = infoSocAddr.getPort();
builder.addEndpoint(URI.create("http://" + infoHost + ":" + port)); builder.addEndpoint(URI.create("http://"
+ NetUtils.getHostPortString(infoSocAddr)));
if (port == 0) { if (port == 0) {
builder.setFindPort(true); builder.setFindPort(true);
} }
@ -381,7 +381,7 @@ private void startInfoServer(Configuration conf) throws IOException {
if (policy.isHttpsEnabled()) { if (policy.isHttpsEnabled()) {
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0)); DFS_DATANODE_HTTPS_ADDRESS_KEY, DFS_DATANODE_HTTPS_ADDRESS_DEFAULT));
Configuration sslConf = DFSUtil.loadSslConfiguration(conf); Configuration sslConf = DFSUtil.loadSslConfiguration(conf);
DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf); DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
@ -390,7 +390,8 @@ private void startInfoServer(Configuration conf) throws IOException {
if (port == 0) { if (port == 0) {
builder.setFindPort(true); builder.setFindPort(true);
} }
builder.addEndpoint(URI.create("https://" + infoHost + ":" + port)); builder.addEndpoint(URI.create("https://"
+ NetUtils.getHostPortString(secInfoSocAddr)));
} }
this.infoServer = builder.build(); this.infoServer = builder.build();

View File

@ -25,6 +25,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.http.HttpServer2;
@ -62,7 +63,9 @@ public SecureResources(ServerSocket streamingSocket,
@Override @Override
public void init(DaemonContext context) throws Exception { public void init(DaemonContext context) throws Exception {
System.err.println("Initializing secure datanode resources"); System.err.println("Initializing secure datanode resources");
Configuration conf = new Configuration(); // Create a new HdfsConfiguration object to ensure that the configuration in
// hdfs-site.xml is picked up.
Configuration conf = new HdfsConfiguration();
// Stash command-line arguments for regular datanode // Stash command-line arguments for regular datanode
args = context.getArguments(); args = context.getArguments();

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormat.renameReservedPathsOnUpgrade;
import static org.apache.hadoop.util.Time.now; import static org.apache.hadoop.util.Time.now;
import java.io.FilterInputStream; import java.io.FilterInputStream;
@ -320,8 +321,10 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
switch (op.opCode) { switch (op.opCode) {
case OP_ADD: { case OP_ADD: {
AddCloseOp addCloseOp = (AddCloseOp)op; AddCloseOp addCloseOp = (AddCloseOp)op;
final String path =
renameReservedPathsOnUpgrade(addCloseOp.path, logVersion);
if (FSNamesystem.LOG.isDebugEnabled()) { if (FSNamesystem.LOG.isDebugEnabled()) {
FSNamesystem.LOG.debug(op.opCode + ": " + addCloseOp.path + FSNamesystem.LOG.debug(op.opCode + ": " + path +
" numblocks : " + addCloseOp.blocks.length + " numblocks : " + addCloseOp.blocks.length +
" clientHolder " + addCloseOp.clientName + " clientHolder " + addCloseOp.clientName +
" clientMachine " + addCloseOp.clientMachine); " clientMachine " + addCloseOp.clientMachine);
@ -332,9 +335,9 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
// 3. OP_ADD to open file for append // 3. OP_ADD to open file for append
// See if the file already exists (persistBlocks call) // See if the file already exists (persistBlocks call)
final INodesInPath iip = fsDir.getLastINodeInPath(addCloseOp.path); final INodesInPath iip = fsDir.getLastINodeInPath(path);
final INodeFile oldFile = INodeFile.valueOf( final INodeFile oldFile = INodeFile.valueOf(
iip.getINode(0), addCloseOp.path, true); iip.getINode(0), path, true);
INodeFile newFile = oldFile; INodeFile newFile = oldFile;
if (oldFile == null) { // this is OP_ADD on a new file (case 1) if (oldFile == null) { // this is OP_ADD on a new file (case 1)
// versions > 0 support per file replication // versions > 0 support per file replication
@ -347,10 +350,10 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion, inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion,
lastInodeId); lastInodeId);
newFile = fsDir.unprotectedAddFile(inodeId, newFile = fsDir.unprotectedAddFile(inodeId,
addCloseOp.path, addCloseOp.permissions, replication, path, addCloseOp.permissions, replication,
addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true, addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true,
addCloseOp.clientName, addCloseOp.clientMachine); addCloseOp.clientName, addCloseOp.clientMachine);
fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path); fsNamesys.leaseManager.addLease(addCloseOp.clientName, path);
// add the op into retry cache if necessary // add the op into retry cache if necessary
if (toAddRetryCache) { if (toAddRetryCache) {
@ -366,11 +369,11 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
FSNamesystem.LOG.debug("Reopening an already-closed file " + FSNamesystem.LOG.debug("Reopening an already-closed file " +
"for append"); "for append");
} }
LocatedBlock lb = fsNamesys.prepareFileForWrite(addCloseOp.path, LocatedBlock lb = fsNamesys.prepareFileForWrite(path,
oldFile, addCloseOp.clientName, addCloseOp.clientMachine, null, oldFile, addCloseOp.clientName, addCloseOp.clientMachine, null,
false, iip.getLatestSnapshotId(), false); false, iip.getLatestSnapshotId(), false);
newFile = INodeFile.valueOf(fsDir.getINode(addCloseOp.path), newFile = INodeFile.valueOf(fsDir.getINode(path),
addCloseOp.path, true); path, true);
// add the op into retry cache is necessary // add the op into retry cache is necessary
if (toAddRetryCache) { if (toAddRetryCache) {
@ -391,16 +394,17 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
} }
case OP_CLOSE: { case OP_CLOSE: {
AddCloseOp addCloseOp = (AddCloseOp)op; AddCloseOp addCloseOp = (AddCloseOp)op;
final String path =
renameReservedPathsOnUpgrade(addCloseOp.path, logVersion);
if (FSNamesystem.LOG.isDebugEnabled()) { if (FSNamesystem.LOG.isDebugEnabled()) {
FSNamesystem.LOG.debug(op.opCode + ": " + addCloseOp.path + FSNamesystem.LOG.debug(op.opCode + ": " + path +
" numblocks : " + addCloseOp.blocks.length + " numblocks : " + addCloseOp.blocks.length +
" clientHolder " + addCloseOp.clientName + " clientHolder " + addCloseOp.clientName +
" clientMachine " + addCloseOp.clientMachine); " clientMachine " + addCloseOp.clientMachine);
} }
final INodesInPath iip = fsDir.getLastINodeInPath(addCloseOp.path); final INodesInPath iip = fsDir.getLastINodeInPath(path);
final INodeFile file = INodeFile.valueOf(iip.getINode(0), addCloseOp.path); final INodeFile file = INodeFile.valueOf(iip.getINode(0), path);
// Update the salient file attributes. // Update the salient file attributes.
file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
@ -414,24 +418,26 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
// could show up twice in a row. But after that version, this // could show up twice in a row. But after that version, this
// should be fixed, so we should treat it as an error. // should be fixed, so we should treat it as an error.
throw new IOException( throw new IOException(
"File is not under construction: " + addCloseOp.path); "File is not under construction: " + path);
} }
// One might expect that you could use removeLease(holder, path) here, // One might expect that you could use removeLease(holder, path) here,
// but OP_CLOSE doesn't serialize the holder. So, remove by path. // but OP_CLOSE doesn't serialize the holder. So, remove by path.
if (file.isUnderConstruction()) { if (file.isUnderConstruction()) {
fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path); fsNamesys.leaseManager.removeLeaseWithPrefixPath(path);
file.toCompleteFile(file.getModificationTime()); file.toCompleteFile(file.getModificationTime());
} }
break; break;
} }
case OP_UPDATE_BLOCKS: { case OP_UPDATE_BLOCKS: {
UpdateBlocksOp updateOp = (UpdateBlocksOp)op; UpdateBlocksOp updateOp = (UpdateBlocksOp)op;
final String path =
renameReservedPathsOnUpgrade(updateOp.path, logVersion);
if (FSNamesystem.LOG.isDebugEnabled()) { if (FSNamesystem.LOG.isDebugEnabled()) {
FSNamesystem.LOG.debug(op.opCode + ": " + updateOp.path + FSNamesystem.LOG.debug(op.opCode + ": " + path +
" numblocks : " + updateOp.blocks.length); " numblocks : " + updateOp.blocks.length);
} }
INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(updateOp.path), INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(path),
updateOp.path); path);
// Update in-memory data structures // Update in-memory data structures
updateBlocks(fsDir, updateOp, oldFile); updateBlocks(fsDir, updateOp, oldFile);
@ -442,7 +448,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
} }
case OP_ADD_BLOCK: { case OP_ADD_BLOCK: {
AddBlockOp addBlockOp = (AddBlockOp) op; AddBlockOp addBlockOp = (AddBlockOp) op;
String path = addBlockOp.getPath(); String path = renameReservedPathsOnUpgrade(addBlockOp.getPath(), logVersion);
if (FSNamesystem.LOG.isDebugEnabled()) { if (FSNamesystem.LOG.isDebugEnabled()) {
FSNamesystem.LOG.debug(op.opCode + ": " + path + FSNamesystem.LOG.debug(op.opCode + ": " + path +
" new block id : " + addBlockOp.getLastBlock().getBlockId()); " new block id : " + addBlockOp.getLastBlock().getBlockId());
@ -456,14 +462,20 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
SetReplicationOp setReplicationOp = (SetReplicationOp)op; SetReplicationOp setReplicationOp = (SetReplicationOp)op;
short replication = fsNamesys.getBlockManager().adjustReplication( short replication = fsNamesys.getBlockManager().adjustReplication(
setReplicationOp.replication); setReplicationOp.replication);
fsDir.unprotectedSetReplication(setReplicationOp.path, fsDir.unprotectedSetReplication(
renameReservedPathsOnUpgrade(setReplicationOp.path, logVersion),
replication, null); replication, null);
break; break;
} }
case OP_CONCAT_DELETE: { case OP_CONCAT_DELETE: {
ConcatDeleteOp concatDeleteOp = (ConcatDeleteOp)op; ConcatDeleteOp concatDeleteOp = (ConcatDeleteOp)op;
fsDir.unprotectedConcat(concatDeleteOp.trg, concatDeleteOp.srcs, String trg = renameReservedPathsOnUpgrade(concatDeleteOp.trg, logVersion);
concatDeleteOp.timestamp); String[] srcs = new String[concatDeleteOp.srcs.length];
for (int i=0; i<srcs.length; i++) {
srcs[i] =
renameReservedPathsOnUpgrade(concatDeleteOp.srcs[i], logVersion);
}
fsDir.unprotectedConcat(trg, srcs, concatDeleteOp.timestamp);
if (toAddRetryCache) { if (toAddRetryCache) {
fsNamesys.addCacheEntry(concatDeleteOp.rpcClientId, fsNamesys.addCacheEntry(concatDeleteOp.rpcClientId,
@ -473,7 +485,9 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
} }
case OP_RENAME_OLD: { case OP_RENAME_OLD: {
RenameOldOp renameOp = (RenameOldOp)op; RenameOldOp renameOp = (RenameOldOp)op;
fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst, final String src = renameReservedPathsOnUpgrade(renameOp.src, logVersion);
final String dst = renameReservedPathsOnUpgrade(renameOp.dst, logVersion);
fsDir.unprotectedRenameTo(src, dst,
renameOp.timestamp); renameOp.timestamp);
if (toAddRetryCache) { if (toAddRetryCache) {
@ -483,7 +497,9 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
} }
case OP_DELETE: { case OP_DELETE: {
DeleteOp deleteOp = (DeleteOp)op; DeleteOp deleteOp = (DeleteOp)op;
fsDir.unprotectedDelete(deleteOp.path, deleteOp.timestamp); fsDir.unprotectedDelete(
renameReservedPathsOnUpgrade(deleteOp.path, logVersion),
deleteOp.timestamp);
if (toAddRetryCache) { if (toAddRetryCache) {
fsNamesys.addCacheEntry(deleteOp.rpcClientId, deleteOp.rpcCallId); fsNamesys.addCacheEntry(deleteOp.rpcClientId, deleteOp.rpcCallId);
@ -494,8 +510,9 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
MkdirOp mkdirOp = (MkdirOp)op; MkdirOp mkdirOp = (MkdirOp)op;
inodeId = getAndUpdateLastInodeId(mkdirOp.inodeId, logVersion, inodeId = getAndUpdateLastInodeId(mkdirOp.inodeId, logVersion,
lastInodeId); lastInodeId);
fsDir.unprotectedMkdir(inodeId, mkdirOp.path, mkdirOp.permissions, fsDir.unprotectedMkdir(inodeId,
mkdirOp.timestamp); renameReservedPathsOnUpgrade(mkdirOp.path, logVersion),
mkdirOp.permissions, mkdirOp.timestamp);
break; break;
} }
case OP_SET_GENSTAMP_V1: { case OP_SET_GENSTAMP_V1: {
@ -505,53 +522,56 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
} }
case OP_SET_PERMISSIONS: { case OP_SET_PERMISSIONS: {
SetPermissionsOp setPermissionsOp = (SetPermissionsOp)op; SetPermissionsOp setPermissionsOp = (SetPermissionsOp)op;
fsDir.unprotectedSetPermission(setPermissionsOp.src, fsDir.unprotectedSetPermission(
renameReservedPathsOnUpgrade(setPermissionsOp.src, logVersion),
setPermissionsOp.permissions); setPermissionsOp.permissions);
break; break;
} }
case OP_SET_OWNER: { case OP_SET_OWNER: {
SetOwnerOp setOwnerOp = (SetOwnerOp)op; SetOwnerOp setOwnerOp = (SetOwnerOp)op;
fsDir.unprotectedSetOwner(setOwnerOp.src, setOwnerOp.username, fsDir.unprotectedSetOwner(
setOwnerOp.groupname); renameReservedPathsOnUpgrade(setOwnerOp.src, logVersion),
setOwnerOp.username, setOwnerOp.groupname);
break; break;
} }
case OP_SET_NS_QUOTA: { case OP_SET_NS_QUOTA: {
SetNSQuotaOp setNSQuotaOp = (SetNSQuotaOp)op; SetNSQuotaOp setNSQuotaOp = (SetNSQuotaOp)op;
fsDir.unprotectedSetQuota(setNSQuotaOp.src, fsDir.unprotectedSetQuota(
setNSQuotaOp.nsQuota, renameReservedPathsOnUpgrade(setNSQuotaOp.src, logVersion),
HdfsConstants.QUOTA_DONT_SET); setNSQuotaOp.nsQuota, HdfsConstants.QUOTA_DONT_SET);
break; break;
} }
case OP_CLEAR_NS_QUOTA: { case OP_CLEAR_NS_QUOTA: {
ClearNSQuotaOp clearNSQuotaOp = (ClearNSQuotaOp)op; ClearNSQuotaOp clearNSQuotaOp = (ClearNSQuotaOp)op;
fsDir.unprotectedSetQuota(clearNSQuotaOp.src, fsDir.unprotectedSetQuota(
HdfsConstants.QUOTA_RESET, renameReservedPathsOnUpgrade(clearNSQuotaOp.src, logVersion),
HdfsConstants.QUOTA_DONT_SET); HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
break; break;
} }
case OP_SET_QUOTA: case OP_SET_QUOTA:
SetQuotaOp setQuotaOp = (SetQuotaOp)op; SetQuotaOp setQuotaOp = (SetQuotaOp)op;
fsDir.unprotectedSetQuota(setQuotaOp.src, fsDir.unprotectedSetQuota(
setQuotaOp.nsQuota, renameReservedPathsOnUpgrade(setQuotaOp.src, logVersion),
setQuotaOp.dsQuota); setQuotaOp.nsQuota, setQuotaOp.dsQuota);
break; break;
case OP_TIMES: { case OP_TIMES: {
TimesOp timesOp = (TimesOp)op; TimesOp timesOp = (TimesOp)op;
fsDir.unprotectedSetTimes(timesOp.path, fsDir.unprotectedSetTimes(
timesOp.mtime, renameReservedPathsOnUpgrade(timesOp.path, logVersion),
timesOp.atime, true); timesOp.mtime, timesOp.atime, true);
break; break;
} }
case OP_SYMLINK: { case OP_SYMLINK: {
SymlinkOp symlinkOp = (SymlinkOp)op; SymlinkOp symlinkOp = (SymlinkOp)op;
inodeId = getAndUpdateLastInodeId(symlinkOp.inodeId, logVersion, inodeId = getAndUpdateLastInodeId(symlinkOp.inodeId, logVersion,
lastInodeId); lastInodeId);
fsDir.unprotectedAddSymlink(inodeId, symlinkOp.path, fsDir.unprotectedAddSymlink(inodeId,
symlinkOp.value, symlinkOp.mtime, renameReservedPathsOnUpgrade(symlinkOp.path, logVersion),
symlinkOp.atime, symlinkOp.permissionStatus); symlinkOp.value, symlinkOp.mtime, symlinkOp.atime,
symlinkOp.permissionStatus);
if (toAddRetryCache) { if (toAddRetryCache) {
fsNamesys.addCacheEntry(symlinkOp.rpcClientId, symlinkOp.rpcCallId); fsNamesys.addCacheEntry(symlinkOp.rpcClientId, symlinkOp.rpcCallId);
@ -560,7 +580,9 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
} }
case OP_RENAME: { case OP_RENAME: {
RenameOp renameOp = (RenameOp)op; RenameOp renameOp = (RenameOp)op;
fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst, fsDir.unprotectedRenameTo(
renameReservedPathsOnUpgrade(renameOp.src, logVersion),
renameReservedPathsOnUpgrade(renameOp.dst, logVersion),
renameOp.timestamp, renameOp.options); renameOp.timestamp, renameOp.options);
if (toAddRetryCache) { if (toAddRetryCache) {
@ -604,10 +626,12 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
Lease lease = fsNamesys.leaseManager.getLease( Lease lease = fsNamesys.leaseManager.getLease(
reassignLeaseOp.leaseHolder); reassignLeaseOp.leaseHolder);
INodeFile pendingFile = fsDir.getINode(reassignLeaseOp.path).asFile(); final String path =
renameReservedPathsOnUpgrade(reassignLeaseOp.path, logVersion);
INodeFile pendingFile = fsDir.getINode(path).asFile();
Preconditions.checkState(pendingFile.isUnderConstruction()); Preconditions.checkState(pendingFile.isUnderConstruction());
fsNamesys.reassignLeaseInternal(lease, fsNamesys.reassignLeaseInternal(lease,
reassignLeaseOp.path, reassignLeaseOp.newHolder, pendingFile); path, reassignLeaseOp.newHolder, pendingFile);
break; break;
} }
case OP_START_LOG_SEGMENT: case OP_START_LOG_SEGMENT:
@ -617,8 +641,11 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
} }
case OP_CREATE_SNAPSHOT: { case OP_CREATE_SNAPSHOT: {
CreateSnapshotOp createSnapshotOp = (CreateSnapshotOp) op; CreateSnapshotOp createSnapshotOp = (CreateSnapshotOp) op;
final String snapshotRoot =
renameReservedPathsOnUpgrade(createSnapshotOp.snapshotRoot,
logVersion);
String path = fsNamesys.getSnapshotManager().createSnapshot( String path = fsNamesys.getSnapshotManager().createSnapshot(
createSnapshotOp.snapshotRoot, createSnapshotOp.snapshotName); snapshotRoot, createSnapshotOp.snapshotName);
if (toAddRetryCache) { if (toAddRetryCache) {
fsNamesys.addCacheEntryWithPayload(createSnapshotOp.rpcClientId, fsNamesys.addCacheEntryWithPayload(createSnapshotOp.rpcClientId,
createSnapshotOp.rpcCallId, path); createSnapshotOp.rpcCallId, path);
@ -629,8 +656,11 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op; DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op;
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<INode>(); List<INode> removedINodes = new ChunkedArrayList<INode>();
final String snapshotRoot =
renameReservedPathsOnUpgrade(deleteSnapshotOp.snapshotRoot,
logVersion);
fsNamesys.getSnapshotManager().deleteSnapshot( fsNamesys.getSnapshotManager().deleteSnapshot(
deleteSnapshotOp.snapshotRoot, deleteSnapshotOp.snapshotName, snapshotRoot, deleteSnapshotOp.snapshotName,
collectedBlocks, removedINodes); collectedBlocks, removedINodes);
fsNamesys.removeBlocksAndUpdateSafemodeTotal(collectedBlocks); fsNamesys.removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
collectedBlocks.clear(); collectedBlocks.clear();
@ -645,8 +675,11 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
} }
case OP_RENAME_SNAPSHOT: { case OP_RENAME_SNAPSHOT: {
RenameSnapshotOp renameSnapshotOp = (RenameSnapshotOp) op; RenameSnapshotOp renameSnapshotOp = (RenameSnapshotOp) op;
final String snapshotRoot =
renameReservedPathsOnUpgrade(renameSnapshotOp.snapshotRoot,
logVersion);
fsNamesys.getSnapshotManager().renameSnapshot( fsNamesys.getSnapshotManager().renameSnapshot(
renameSnapshotOp.snapshotRoot, renameSnapshotOp.snapshotOldName, snapshotRoot, renameSnapshotOp.snapshotOldName,
renameSnapshotOp.snapshotNewName); renameSnapshotOp.snapshotNewName);
if (toAddRetryCache) { if (toAddRetryCache) {
@ -657,14 +690,19 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
} }
case OP_ALLOW_SNAPSHOT: { case OP_ALLOW_SNAPSHOT: {
AllowSnapshotOp allowSnapshotOp = (AllowSnapshotOp) op; AllowSnapshotOp allowSnapshotOp = (AllowSnapshotOp) op;
final String snapshotRoot =
renameReservedPathsOnUpgrade(allowSnapshotOp.snapshotRoot, logVersion);
fsNamesys.getSnapshotManager().setSnapshottable( fsNamesys.getSnapshotManager().setSnapshottable(
allowSnapshotOp.snapshotRoot, false); snapshotRoot, false);
break; break;
} }
case OP_DISALLOW_SNAPSHOT: { case OP_DISALLOW_SNAPSHOT: {
DisallowSnapshotOp disallowSnapshotOp = (DisallowSnapshotOp) op; DisallowSnapshotOp disallowSnapshotOp = (DisallowSnapshotOp) op;
final String snapshotRoot =
renameReservedPathsOnUpgrade(disallowSnapshotOp.snapshotRoot,
logVersion);
fsNamesys.getSnapshotManager().resetSnapshottable( fsNamesys.getSnapshotManager().resetSnapshottable(
disallowSnapshotOp.snapshotRoot); snapshotRoot);
break; break;
} }
case OP_SET_GENSTAMP_V2: { case OP_SET_GENSTAMP_V2: {

View File

@ -32,12 +32,13 @@
import java.security.MessageDigest; import java.security.MessageDigest;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -45,13 +46,15 @@
import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
@ -67,6 +70,10 @@
import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/** /**
* Contains inner classes for reading or writing the on-disk format for * Contains inner classes for reading or writing the on-disk format for
@ -411,7 +418,8 @@ private void loadLocalNameINodesWithSnapshot(long numFiles, DataInput in,
} }
/** /**
* load fsimage files assuming only local names are stored * load fsimage files assuming only local names are stored. Used when
* snapshots are not supported by the layout version.
* *
* @param numFiles number of files expected to be read * @param numFiles number of files expected to be read
* @param in image input stream * @param in image input stream
@ -527,6 +535,8 @@ private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
*/ */
private int loadDirectory(DataInput in, Counter counter) throws IOException { private int loadDirectory(DataInput in, Counter counter) throws IOException {
String parentPath = FSImageSerialization.readString(in); String parentPath = FSImageSerialization.readString(in);
// Rename .snapshot paths if we're doing an upgrade
parentPath = renameReservedPathsOnUpgrade(parentPath, getLayoutVersion());
final INodeDirectory parent = INodeDirectory.valueOf( final INodeDirectory parent = INodeDirectory.valueOf(
namesystem.dir.rootDir.getNode(parentPath, true), parentPath); namesystem.dir.rootDir.getNode(parentPath, true), parentPath);
return loadChildren(parent, in, counter); return loadChildren(parent, in, counter);
@ -586,11 +596,9 @@ private INodeDirectory getParentINodeDirectory(byte[][] pathComponents
*/ */
private void addToParent(INodeDirectory parent, INode child) { private void addToParent(INodeDirectory parent, INode child) {
FSDirectory fsDir = namesystem.dir; FSDirectory fsDir = namesystem.dir;
if (parent == fsDir.rootDir && FSDirectory.isReservedName(child)) { if (parent == fsDir.rootDir) {
throw new HadoopIllegalArgumentException("File name \"" child.setLocalName(renameReservedRootComponentOnUpgrade(
+ child.getLocalName() + "\" is reserved. Please " child.getLocalNameBytes(), getLayoutVersion()));
+ " change the name of the existing file or directory to another "
+ "name before upgrading to this release.");
} }
// NOTE: This does not update space counts for parents // NOTE: This does not update space counts for parents
if (!parent.addChild(child)) { if (!parent.addChild(child)) {
@ -627,7 +635,9 @@ public INode loadINodeWithLocalName(boolean isSnapshotINode, DataInput in,
public INode loadINodeWithLocalName(boolean isSnapshotINode, public INode loadINodeWithLocalName(boolean isSnapshotINode,
DataInput in, boolean updateINodeMap, Counter counter) DataInput in, boolean updateINodeMap, Counter counter)
throws IOException { throws IOException {
final byte[] localName = FSImageSerialization.readLocalName(in); byte[] localName = FSImageSerialization.readLocalName(in);
localName =
renameReservedComponentOnUpgrade(localName, getLayoutVersion());
INode inode = loadINode(localName, isSnapshotINode, in, counter); INode inode = loadINode(localName, isSnapshotINode, in, counter);
if (updateINodeMap if (updateINodeMap
&& NameNodeLayoutVersion.supports( && NameNodeLayoutVersion.supports(
@ -945,6 +955,155 @@ public Snapshot getSnapshot(DataInput in) throws IOException {
} }
} }
@VisibleForTesting
public static TreeMap<String, String> renameReservedMap =
new TreeMap<String, String>();
/**
* Use the default key-value pairs that will be used to determine how to
* rename reserved paths on upgrade.
*/
@VisibleForTesting
public static void useDefaultRenameReservedPairs() {
renameReservedMap.clear();
for (String key: HdfsConstants.RESERVED_PATH_COMPONENTS) {
renameReservedMap.put(
key,
key + "." + HdfsConstants.NAMENODE_LAYOUT_VERSION + "."
+ "UPGRADE_RENAMED");
}
}
/**
* Set the key-value pairs that will be used to determine how to rename
* reserved paths on upgrade.
*/
@VisibleForTesting
public static void setRenameReservedPairs(String renameReserved) {
// Clear and set the default values
useDefaultRenameReservedPairs();
// Overwrite with provided values
setRenameReservedMapInternal(renameReserved);
}
private static void setRenameReservedMapInternal(String renameReserved) {
Collection<String> pairs =
StringUtils.getTrimmedStringCollection(renameReserved);
for (String p : pairs) {
String[] pair = StringUtils.split(p, '/', '=');
Preconditions.checkArgument(pair.length == 2,
"Could not parse key-value pair " + p);
String key = pair[0];
String value = pair[1];
Preconditions.checkArgument(DFSUtil.isReservedPathComponent(key),
"Unknown reserved path " + key);
Preconditions.checkArgument(DFSUtil.isValidNameForComponent(value),
"Invalid rename path for " + key + ": " + value);
LOG.info("Will rename reserved path " + key + " to " + value);
renameReservedMap.put(key, value);
}
}
/**
* When upgrading from an old version, the filesystem could contain paths
* that are now reserved in the new version (e.g. .snapshot). This renames
* these new reserved paths to a user-specified value to avoid collisions
* with the reserved name.
*
* @param path Old path potentially containing a reserved path
* @return New path with reserved path components renamed to user value
*/
static String renameReservedPathsOnUpgrade(String path,
final int layoutVersion) {
final String oldPath = path;
// If any known LVs aren't supported, we're doing an upgrade
if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
String[] components = INode.getPathNames(path);
// Only need to worry about the root directory
if (components.length > 1) {
components[1] = DFSUtil.bytes2String(
renameReservedRootComponentOnUpgrade(
DFSUtil.string2Bytes(components[1]),
layoutVersion));
path = DFSUtil.strings2PathString(components);
}
}
if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
String[] components = INode.getPathNames(path);
// Special case the root path
if (components.length == 0) {
return path;
}
for (int i=0; i<components.length; i++) {
components[i] = DFSUtil.bytes2String(
renameReservedComponentOnUpgrade(
DFSUtil.string2Bytes(components[i]),
layoutVersion));
}
path = DFSUtil.strings2PathString(components);
}
if (!path.equals(oldPath)) {
LOG.info("Upgrade process renamed reserved path " + oldPath + " to "
+ path);
}
return path;
}
private final static String RESERVED_ERROR_MSG =
FSDirectory.DOT_RESERVED_PATH_PREFIX + " is a reserved path and "
+ HdfsConstants.DOT_SNAPSHOT_DIR + " is a reserved path component in"
+ " this version of HDFS. Please rollback and delete or rename"
+ " this path, or upgrade with the "
+ StartupOption.RENAMERESERVED.getName()
+ " [key-value pairs]"
+ " option to automatically rename these paths during upgrade.";
/**
* Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
* byte array path component.
*/
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
final int layoutVersion) {
// If the LV doesn't support snapshots, we're doing an upgrade
if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) {
Preconditions.checkArgument(
renameReservedMap != null &&
renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
RESERVED_ERROR_MSG);
component =
DFSUtil.string2Bytes(renameReservedMap
.get(HdfsConstants.DOT_SNAPSHOT_DIR));
}
}
return component;
}
/**
* Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
* byte array path component.
*/
private static byte[] renameReservedRootComponentOnUpgrade(byte[] component,
final int layoutVersion) {
// If the LV doesn't support inode IDs, we're doing an upgrade
if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) {
Preconditions.checkArgument(
renameReservedMap != null &&
renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING),
RESERVED_ERROR_MSG);
final String renameString = renameReservedMap
.get(FSDirectory.DOT_RESERVED_STRING);
component =
DFSUtil.string2Bytes(renameString);
LOG.info("Renamed root path " + FSDirectory.DOT_RESERVED_STRING
+ " to " + renameString);
}
}
return component;
}
/** /**
* A one-shot class responsible for writing an image file. * A one-shot class responsible for writing an image file.
* The write() function should be called once, after which the getter * The write() function should be called once, after which the getter

View File

@ -1180,7 +1180,8 @@ private void checkNameNodeSafeMode(String errorMsg)
if (isInSafeMode()) { if (isInSafeMode()) {
SafeModeException se = new SafeModeException(errorMsg, safeMode); SafeModeException se = new SafeModeException(errorMsg, safeMode);
if (haEnabled && haContext != null if (haEnabled && haContext != null
&& haContext.getState().getServiceState() == HAServiceState.ACTIVE) { && haContext.getState().getServiceState() == HAServiceState.ACTIVE
&& shouldRetrySafeMode(this.safeMode)) {
throw new RetriableException(se); throw new RetriableException(se);
} else { } else {
throw se; throw se;
@ -1188,6 +1189,18 @@ private void checkNameNodeSafeMode(String errorMsg)
} }
} }
/**
* We already know that the safemode is on. We will throw a RetriableException
* if the safemode is not manual or caused by low resource.
*/
private boolean shouldRetrySafeMode(SafeModeInfo safeMode) {
if (safeMode == null) {
return false;
} else {
return !safeMode.isManual() && !safeMode.areResourcesLow();
}
}
public static Collection<URI> getNamespaceDirs(Configuration conf) { public static Collection<URI> getNamespaceDirs(Configuration conf) {
return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY); return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY);
} }

View File

@ -212,7 +212,9 @@ public static enum OperationCategory {
+ StartupOption.CLUSTERID.getName() + " cid ] [" + StartupOption.CLUSTERID.getName() + " cid ] ["
+ StartupOption.FORCE.getName() + "] [" + StartupOption.FORCE.getName() + "] ["
+ StartupOption.NONINTERACTIVE.getName() + "] ] | [" + StartupOption.NONINTERACTIVE.getName() + "] ] | ["
+ StartupOption.UPGRADE.getName() + "] | [" + StartupOption.UPGRADE.getName() +
" [" + StartupOption.CLUSTERID.getName() + " cid]" +
" [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | ["
+ StartupOption.ROLLBACK.getName() + "] | [" + StartupOption.ROLLBACK.getName() + "] | ["
+ StartupOption.FINALIZE.getName() + "] | [" + StartupOption.FINALIZE.getName() + "] | ["
+ StartupOption.IMPORT.getName() + "] | [" + StartupOption.IMPORT.getName() + "] | ["
@ -1056,7 +1058,8 @@ private static void printUsage(PrintStream out) {
out.println(USAGE + "\n"); out.println(USAGE + "\n");
} }
private static StartupOption parseArguments(String args[]) { @VisibleForTesting
static StartupOption parseArguments(String args[]) {
int argsLen = (args == null) ? 0 : args.length; int argsLen = (args == null) ? 0 : args.length;
StartupOption startOpt = StartupOption.REGULAR; StartupOption startOpt = StartupOption.REGULAR;
for(int i=0; i < argsLen; i++) { for(int i=0; i < argsLen; i++) {
@ -1103,11 +1106,33 @@ private static StartupOption parseArguments(String args[]) {
startOpt = StartupOption.CHECKPOINT; startOpt = StartupOption.CHECKPOINT;
} else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) { } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.UPGRADE; startOpt = StartupOption.UPGRADE;
// might be followed by two args /* Can be followed by CLUSTERID with a required parameter or
if (i + 2 < argsLen * RENAMERESERVED with an optional parameter
&& args[i + 1].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) { */
while (i + 1 < argsLen) {
String flag = args[i + 1];
if (flag.equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
if (i + 2 < argsLen) {
i += 2; i += 2;
startOpt.setClusterId(args[i]); startOpt.setClusterId(args[i]);
} else {
LOG.fatal("Must specify a valid cluster ID after the "
+ StartupOption.CLUSTERID.getName() + " flag");
return null;
}
} else if (flag.equalsIgnoreCase(StartupOption.RENAMERESERVED
.getName())) {
if (i + 2 < argsLen) {
FSImageFormat.setRenameReservedPairs(args[i + 2]);
i += 2;
} else {
FSImageFormat.useDefaultRenameReservedPairs();
i += 1;
}
} else {
LOG.fatal("Unknown upgrade flag " + flag);
return null;
}
} }
} else if (StartupOption.ROLLINGUPGRADE.getName().equalsIgnoreCase(cmd)) { } else if (StartupOption.ROLLINGUPGRADE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLINGUPGRADE; startOpt = StartupOption.ROLLINGUPGRADE;

View File

@ -962,9 +962,8 @@ public int run(Configuration conf, List<String> args) throws IOException {
if (numResults > 0) { if (numResults > 0) {
System.out.print(listing); System.out.print(listing);
} }
// If there are no results, we return 1 (failure exit code); // If list pools succeed, we return 0 (success exit code)
// otherwise we return 0 (success exit code). return 0;
return (numResults == 0) ? 1 : 0;
} }
} }

View File

@ -188,6 +188,9 @@ public synchronized void initialize(URI uri, Configuration conf
int maxFailoverAttempts = conf.getInt( int maxFailoverAttempts = conf.getInt(
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
int maxRetryAttempts = conf.getInt(
DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
int failoverSleepBaseMillis = conf.getInt( int failoverSleepBaseMillis = conf.getInt(
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY, DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT); DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
@ -197,7 +200,7 @@ public synchronized void initialize(URI uri, Configuration conf
this.retryPolicy = RetryPolicies this.retryPolicy = RetryPolicies
.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL, .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
maxFailoverAttempts, failoverSleepBaseMillis, maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis,
failoverSleepMaxMillis); failoverSleepMaxMillis);
} }

View File

@ -459,6 +459,33 @@ HDFS Users Guide
* start the cluster with rollback option. (<<<bin/start-dfs.sh -rollback>>>). * start the cluster with rollback option. (<<<bin/start-dfs.sh -rollback>>>).
When upgrading to a new version of HDFS, it is necessary to rename or
delete any paths that are reserved in the new version of HDFS. If the
NameNode encounters a reserved path during upgrade, it will print an
error like the following:
<<< /.reserved is a reserved path and .snapshot is a
reserved path component in this version of HDFS. Please rollback and delete
or rename this path, or upgrade with the -renameReserved [key-value pairs]
option to automatically rename these paths during upgrade.>>>
Specifying <<<-upgrade -renameReserved [optional key-value pairs]>>> causes
the NameNode to automatically rename any reserved paths found during
startup. For example, to rename all paths named <<<.snapshot>>> to
<<<.my-snapshot>>> and <<<.reserved>>> to <<<.my-reserved>>>, a user would
specify <<<-upgrade -renameReserved
.snapshot=.my-snapshot,.reserved=.my-reserved>>>.
If no key-value pairs are specified with <<<-renameReserved>>>, the
NameNode will then suffix reserved paths with
<<<.<LAYOUT-VERSION>.UPGRADE_RENAMED>>>, e.g.
<<<.snapshot.-51.UPGRADE_RENAMED>>>.
There are some caveats to this renaming process. It's recommended,
if possible, to first <<<hdfs dfsadmin -saveNamespace>>> before upgrading.
This is because data inconsistency can result if an edit log operation
refers to the destination of an automatically renamed file.
* File Permissions and Security * File Permissions and Security
The file permissions are designed to be similar to file permissions on The file permissions are designed to be similar to file permissions on

View File

@ -20,7 +20,7 @@
xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd"> xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
<properties> <properties>
<title>HFDS Snapshots</title> <title>HDFS Snapshots</title>
</properties> </properties>
<body> <body>
@ -99,15 +99,22 @@
<li>Copying a file from snapshot <code>s0</code>: <li>Copying a file from snapshot <code>s0</code>:
<source>hdfs dfs -cp /foo/.snapshot/s0/bar /tmp</source></li> <source>hdfs dfs -cp /foo/.snapshot/s0/bar /tmp</source></li>
</ul> </ul>
<p>
<b>Note</b> that the name ".snapshot" is now a reserved file name in HDFS
so that users cannot create a file/directory with ".snapshot" as the name.
If ".snapshot" is used in a previous version of HDFS, it must be renamed before upgrade;
otherwise, upgrade will fail.
</p>
</subsection> </subsection>
</section> </section>
<section name="Upgrading to a version of HDFS with snapshots" id="Upgrade">
<p>
The HDFS snapshot feature introduces a new reserved path name used to
interact with snapshots: <tt>.snapshot</tt>. When upgrading from an
older version of HDFS, existing paths named <tt>.snapshot</tt> need
to first be renamed or deleted to avoid conflicting with the reserved path.
See the upgrade section in
<a href="HdfsUserGuide.html#Upgrade_and_Rollback">the HDFS user guide</a>
for more information. </p>
</section>
<section name="Snapshot Operations" id="SnapshotOperations"> <section name="Snapshot Operations" id="SnapshotOperations">
<subsection name="Administrator Operations" id="AdministratorOperations"> <subsection name="Administrator Operations" id="AdministratorOperations">
<p> <p>

View File

@ -27,6 +27,7 @@
import java.io.FileOutputStream; import java.io.FileOutputStream;
import java.io.FileReader; import java.io.FileReader;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.TreeMap; import java.util.TreeMap;
@ -43,7 +44,9 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.junit.Test; import org.junit.Test;
@ -67,6 +70,7 @@ public class TestDFSUpgradeFromImage {
private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt"; private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz"; private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz"; private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz";
private static final String HADOOP2_RESERVED_IMAGE = "hadoop-2-reserved.tgz";
private static class ReferenceFileInfo { private static class ReferenceFileInfo {
String path; String path;
@ -321,6 +325,87 @@ public void testUpgradeFromCorruptRel22Image() throws IOException {
} }
} }
/**
* Test upgrade from 2.0 image with a variety of .snapshot and .reserved
* paths to test renaming on upgrade
*/
@Test
public void testUpgradeFromRel2ReservedImage() throws IOException {
unpackStorage(HADOOP2_RESERVED_IMAGE);
MiniDFSCluster cluster = null;
// Try it once without setting the upgrade flag to ensure it fails
try {
cluster =
new MiniDFSCluster.Builder(new Configuration())
.format(false)
.startupOption(StartupOption.UPGRADE)
.numDataNodes(0).build();
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"reserved path component in this version",
e);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
// Try it again with a custom rename string
try {
FSImageFormat.setRenameReservedPairs(
".snapshot=.user-snapshot," +
".reserved=.my-reserved");
cluster =
new MiniDFSCluster.Builder(new Configuration())
.format(false)
.startupOption(StartupOption.UPGRADE)
.numDataNodes(0).build();
// Make sure the paths were renamed as expected
DistributedFileSystem dfs = cluster.getFileSystem();
ArrayList<Path> toList = new ArrayList<Path>();
ArrayList<String> found = new ArrayList<String>();
toList.add(new Path("/"));
while (!toList.isEmpty()) {
Path p = toList.remove(0);
FileStatus[] statuses = dfs.listStatus(p);
for (FileStatus status: statuses) {
final String path = status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
String[] expected = new String[] {
"/edits",
"/edits/.reserved",
"/edits/.user-snapshot",
"/edits/.user-snapshot/editsdir",
"/edits/.user-snapshot/editsdir/editscontents",
"/edits/.user-snapshot/editsdir/editsdir2",
"/image",
"/image/.reserved",
"/image/.user-snapshot",
"/image/.user-snapshot/imagedir",
"/image/.user-snapshot/imagedir/imagecontents",
"/image/.user-snapshot/imagedir/imagedir2",
"/.my-reserved",
"/.my-reserved/edits-touch",
"/.my-reserved/image-touch"
};
for (String s: expected) {
assertTrue("Did not find expected path " + s, found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",
found.size(), expected.length);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
static void recoverAllLeases(DFSClient dfs, static void recoverAllLeases(DFSClient dfs,
Path path) throws IOException { Path path) throws IOException {
String pathStr = path.toString(); String pathStr = path.toString();

View File

@ -19,16 +19,19 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import com.google.common.base.Supplier;
import java.io.InputStream; import java.io.InputStream;
import java.io.PrintWriter; import java.io.PrintWriter;
import java.net.InetSocketAddress;
import java.net.Socket;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
@ -37,10 +40,8 @@
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -51,10 +52,7 @@
public class TestDataTransferKeepalive { public class TestDataTransferKeepalive {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
private FileSystem fs;
private InetSocketAddress dnAddr;
private DataNode dn; private DataNode dn;
private DFSClient dfsClient;
private static Path TEST_FILE = new Path("/test"); private static Path TEST_FILE = new Path("/test");
private static final int KEEPALIVE_TIMEOUT = 1000; private static final int KEEPALIVE_TIMEOUT = 1000;
@ -69,15 +67,7 @@ public void setup() throws Exception {
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build(); .numDataNodes(1).build();
fs = cluster.getFileSystem();
dfsClient = ((DistributedFileSystem)fs).dfs;
dfsClient.peerCache.clear();
String poolId = cluster.getNamesystem().getBlockPoolId();
dn = cluster.getDataNodes().get(0); dn = cluster.getDataNodes().get(0);
DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
dn, poolId);
dnAddr = NetUtils.createSocketAddr(dnReg.getXferAddr());
} }
@After @After
@ -90,34 +80,86 @@ public void teardown() {
* its configured keepalive timeout. * its configured keepalive timeout.
*/ */
@Test(timeout=30000) @Test(timeout=30000)
public void testKeepaliveTimeouts() throws Exception { public void testDatanodeRespectsKeepAliveTimeout() throws Exception {
Configuration clientConf = new Configuration(conf);
// Set a client socket cache expiry time much longer than
// the datanode-side expiration time.
final long CLIENT_EXPIRY_MS = 60000L;
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT, CLIENT_EXPIRY_MS);
DistributedFileSystem fs =
(DistributedFileSystem)FileSystem.get(cluster.getURI(),
clientConf);
DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L); DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
// Clients that write aren't currently re-used. // Clients that write aren't currently re-used.
assertEquals(0, dfsClient.peerCache.size()); assertEquals(0, fs.dfs.peerCache.size());
assertXceiverCount(0); assertXceiverCount(0);
// Reads the file, so we should get a // Reads the file, so we should get a
// cached socket, and should have an xceiver on the other side. // cached socket, and should have an xceiver on the other side.
DFSTestUtil.readFile(fs, TEST_FILE); DFSTestUtil.readFile(fs, TEST_FILE);
assertEquals(1, dfsClient.peerCache.size()); assertEquals(1, fs.dfs.peerCache.size());
assertXceiverCount(1); assertXceiverCount(1);
// Sleep for a bit longer than the keepalive timeout // Sleep for a bit longer than the keepalive timeout
// and make sure the xceiver died. // and make sure the xceiver died.
Thread.sleep(KEEPALIVE_TIMEOUT * 2); Thread.sleep(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT + 1);
assertXceiverCount(0); assertXceiverCount(0);
// The socket is still in the cache, because we don't // The socket is still in the cache, because we don't
// notice that it's closed until we try to read // notice that it's closed until we try to read
// from it again. // from it again.
assertEquals(1, dfsClient.peerCache.size()); assertEquals(1, fs.dfs.peerCache.size());
// Take it out of the cache - reading should // Take it out of the cache - reading should
// give an EOF. // give an EOF.
Peer peer = dfsClient.peerCache.get(dn.getDatanodeId(), false); Peer peer = fs.dfs.peerCache.get(dn.getDatanodeId(), false);
assertNotNull(peer); assertNotNull(peer);
assertEquals(-1, peer.getInputStream().read()); assertEquals(-1, peer.getInputStream().read());
PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT,
DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT);
}
/**
* Test that the client respects its keepalive timeout.
*/
@Test(timeout=30000)
public void testClientResponsesKeepAliveTimeout() throws Exception {
Configuration clientConf = new Configuration(conf);
// Set a client socket cache expiry time much shorter than
// the datanode-side expiration time.
final long CLIENT_EXPIRY_MS = 10L;
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT, CLIENT_EXPIRY_MS);
DistributedFileSystem fs =
(DistributedFileSystem)FileSystem.get(cluster.getURI(),
clientConf);
DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
// Clients that write aren't currently re-used.
assertEquals(0, fs.dfs.peerCache.size());
assertXceiverCount(0);
// Reads the file, so we should get a
// cached socket, and should have an xceiver on the other side.
DFSTestUtil.readFile(fs, TEST_FILE);
assertEquals(1, fs.dfs.peerCache.size());
assertXceiverCount(1);
// Sleep for a bit longer than the client keepalive timeout.
Thread.sleep(CLIENT_EXPIRY_MS + 1);
// Taking out a peer which is expired should give a null.
Peer peer = fs.dfs.peerCache.get(dn.getDatanodeId(), false);
assertTrue(peer == null);
// The socket cache is now empty.
assertEquals(0, fs.dfs.peerCache.size());
PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT,
DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT);
} }
/** /**
@ -125,8 +167,17 @@ public void testKeepaliveTimeouts() throws Exception {
* read bytes off the stream quickly. The datanode should time out sending the * read bytes off the stream quickly. The datanode should time out sending the
* chunks and the transceiver should die, even if it has a long keepalive. * chunks and the transceiver should die, even if it has a long keepalive.
*/ */
@Test(timeout=30000) @Test(timeout=300000)
public void testSlowReader() throws Exception { public void testSlowReader() throws Exception {
// Set a client socket cache expiry time much longer than
// the datanode-side expiration time.
final long CLIENT_EXPIRY_MS = 600000L;
Configuration clientConf = new Configuration(conf);
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT, CLIENT_EXPIRY_MS);
DistributedFileSystem fs =
(DistributedFileSystem)FileSystem.get(cluster.getURI(),
clientConf);
// Restart the DN with a shorter write timeout. // Restart the DN with a shorter write timeout.
DataNodeProperties props = cluster.stopDataNode(0); DataNodeProperties props = cluster.stopDataNode(0);
props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
@ -134,38 +185,31 @@ public void testSlowReader() throws Exception {
props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
120000); 120000);
assertTrue(cluster.restartDataNode(props, true)); assertTrue(cluster.restartDataNode(props, true));
dn = cluster.getDataNodes().get(0);
// Wait for heartbeats to avoid a startup race where we // Wait for heartbeats to avoid a startup race where we
// try to write the block while the DN is still starting. // try to write the block while the DN is still starting.
cluster.triggerHeartbeats(); cluster.triggerHeartbeats();
dn = cluster.getDataNodes().get(0);
DFSTestUtil.createFile(fs, TEST_FILE, 1024*1024*8L, (short)1, 0L); DFSTestUtil.createFile(fs, TEST_FILE, 1024*1024*8L, (short)1, 0L);
FSDataInputStream stm = fs.open(TEST_FILE); FSDataInputStream stm = fs.open(TEST_FILE);
try {
stm.read(); stm.read();
assertXceiverCount(1); assertXceiverCount(1);
// Poll for 0 running xceivers. Allow up to 5 seconds for some slack. GenericTestUtils.waitFor(new Supplier<Boolean>() {
long totalSleepTime = 0; public Boolean get() {
long sleepTime = WRITE_TIMEOUT + 100;
while (getXceiverCountWithoutServer() > 0 && totalSleepTime < 5000) {
Thread.sleep(sleepTime);
totalSleepTime += sleepTime;
sleepTime = 100;
}
// DN should time out in sendChunks, and this should force // DN should time out in sendChunks, and this should force
// the xceiver to exit. // the xceiver to exit.
assertXceiverCount(0); return getXceiverCountWithoutServer() == 0;
} finally {
IOUtils.closeStream(stm);
} }
}, 500, 50000);
IOUtils.closeStream(stm);
} }
@Test(timeout=30000) @Test(timeout=30000)
public void testManyClosedSocketsInCache() throws Exception { public void testManyClosedSocketsInCache() throws Exception {
// Make a small file // Make a small file
DistributedFileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L); DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
// Insert a bunch of dead sockets in the cache, by opening // Insert a bunch of dead sockets in the cache, by opening

View File

@ -0,0 +1,54 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.http.HttpConfig.Policy.HTTP_AND_HTTPS;
import static org.apache.hadoop.http.HttpConfig.Policy.HTTP_ONLY;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.junit.Assert;
import org.junit.Test;
public final class TestHttpPolicy {
@Test(expected = HadoopIllegalArgumentException.class)
public void testInvalidPolicyValue() {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, "invalid");
DFSUtil.getHttpPolicy(conf);
}
@Test
public void testDeprecatedConfiguration() {
Configuration conf = new Configuration(false);
Assert.assertSame(HTTP_ONLY, DFSUtil.getHttpPolicy(conf));
conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
Assert.assertSame(HTTP_AND_HTTPS, DFSUtil.getHttpPolicy(conf));
conf = new Configuration(false);
conf.setBoolean(DFSConfigKeys.HADOOP_SSL_ENABLED_KEY, true);
Assert.assertSame(HTTP_AND_HTTPS, DFSUtil.getHttpPolicy(conf));
conf = new Configuration(false);
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HTTP_ONLY.name());
conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
Assert.assertSame(HTTP_ONLY, DFSUtil.getHttpPolicy(conf));
}
}

View File

@ -65,7 +65,7 @@ public class TestBalancerWithNodeGroup {
ClientProtocol client; ClientProtocol client;
static final long TIMEOUT = 20000L; //msec static final long TIMEOUT = 40000L; //msec
static final double CAPACITY_ALLOWED_VARIANCE = 0.005; // 0.5% static final double CAPACITY_ALLOWED_VARIANCE = 0.005; // 0.5%
static final double BALANCE_ALLOWED_VARIANCE = 0.11; // 10%+delta static final double BALANCE_ALLOWED_VARIANCE = 0.11; // 10%+delta
static final int DEFAULT_BLOCK_SIZE = 10; static final int DEFAULT_BLOCK_SIZE = 10;

View File

@ -124,6 +124,8 @@ public void setUp() throws Exception {
CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
NetworkTopologyWithNodeGroup.class.getName()); NetworkTopologyWithNodeGroup.class.getName());
CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class); File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class);
CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,

View File

@ -85,6 +85,7 @@ public static void tearDown() throws Exception {
@Test @Test
public void testHttpPolicy() throws Exception { public void testHttpPolicy() throws Exception {
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name()); conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0); InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
NameNodeHttpServer server = null; NameNodeHttpServer server = null;
@ -103,9 +104,11 @@ public void testHttpPolicy() throws Exception {
server.getHttpsAddress() == null)); server.getHttpsAddress() == null));
} finally { } finally {
if (server != null) {
server.stop(); server.stop();
} }
} }
}
private static boolean canAccess(String scheme, InetSocketAddress addr) { private static boolean canAccess(String scheme, InetSocketAddress addr) {
if (addr == null) if (addr == null)

View File

@ -0,0 +1,105 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.junit.Test;
public class TestNameNodeOptionParsing {
@Test(timeout = 10000)
public void testUpgrade() {
StartupOption opt = null;
// UPGRADE is set, but nothing else
opt = NameNode.parseArguments(new String[] {"-upgrade"});
assertEquals(opt, StartupOption.UPGRADE);
assertNull(opt.getClusterId());
assertTrue(FSImageFormat.renameReservedMap.isEmpty());
// cluster ID is set
opt = NameNode.parseArguments(new String[] { "-upgrade", "-clusterid",
"mycid" });
assertEquals(StartupOption.UPGRADE, opt);
assertEquals("mycid", opt.getClusterId());
assertTrue(FSImageFormat.renameReservedMap.isEmpty());
// Everything is set
opt = NameNode.parseArguments(new String[] { "-upgrade", "-clusterid",
"mycid", "-renameReserved",
".snapshot=.my-snapshot,.reserved=.my-reserved" });
assertEquals(StartupOption.UPGRADE, opt);
assertEquals("mycid", opt.getClusterId());
assertEquals(".my-snapshot",
FSImageFormat.renameReservedMap.get(".snapshot"));
assertEquals(".my-reserved",
FSImageFormat.renameReservedMap.get(".reserved"));
// Reset the map
FSImageFormat.renameReservedMap.clear();
// Everything is set, but in a different order
opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
".reserved=.my-reserved,.snapshot=.my-snapshot", "-clusterid",
"mycid"});
assertEquals(StartupOption.UPGRADE, opt);
assertEquals("mycid", opt.getClusterId());
assertEquals(".my-snapshot",
FSImageFormat.renameReservedMap.get(".snapshot"));
assertEquals(".my-reserved",
FSImageFormat.renameReservedMap.get(".reserved"));
// Try the default renameReserved
opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved"});
assertEquals(StartupOption.UPGRADE, opt);
assertEquals(
".snapshot." + HdfsConstants.NAMENODE_LAYOUT_VERSION
+ ".UPGRADE_RENAMED",
FSImageFormat.renameReservedMap.get(".snapshot"));
assertEquals(
".reserved." + HdfsConstants.NAMENODE_LAYOUT_VERSION
+ ".UPGRADE_RENAMED",
FSImageFormat.renameReservedMap.get(".reserved"));
// Try some error conditions
try {
opt =
NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
".reserved=.my-reserved,.not-reserved=.my-not-reserved" });
} catch (IllegalArgumentException e) {
assertExceptionContains("Unknown reserved path", e);
}
try {
opt =
NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
".reserved=.my-reserved,.snapshot=.snapshot" });
} catch (IllegalArgumentException e) {
assertExceptionContains("Invalid rename path", e);
}
try {
opt =
NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
".snapshot=.reserved" });
} catch (IllegalArgumentException e) {
assertExceptionContains("Invalid rename path", e);
}
opt = NameNode.parseArguments(new String[] { "-upgrade", "-cid"});
assertNull(opt);
}
}

View File

@ -55,6 +55,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
@ -65,6 +66,7 @@
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import com.google.common.base.Supplier; import com.google.common.base.Supplier;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
@ -124,6 +126,9 @@ public void testClientRetrySafeMode() throws Exception {
final Path test = new Path("/test"); final Path test = new Path("/test");
// let nn0 enter safemode // let nn0 enter safemode
NameNodeAdapter.enterSafeMode(nn0, false); NameNodeAdapter.enterSafeMode(nn0, false);
SafeModeInfo safeMode = (SafeModeInfo) Whitebox.getInternalState(
nn0.getNamesystem(), "safeMode");
Whitebox.setInternalState(safeMode, "extension", Integer.valueOf(30000));
LOG.info("enter safemode"); LOG.info("enter safemode");
new Thread() { new Thread() {
@Override @Override

View File

@ -52,6 +52,7 @@ public static void setUp() throws Exception {
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
File base = new File(BASEDIR); File base = new File(BASEDIR);
FileUtil.fullyDelete(base); FileUtil.fullyDelete(base);

View File

@ -153,6 +153,8 @@ Release 2.4.0 - UNRELEASED
MAPREDUCE-5732. Report proper queue when job has been automatically placed MAPREDUCE-5732. Report proper queue when job has been automatically placed
(Sandy Ryza) (Sandy Ryza)
MAPREDUCE-5699. Allow setting tags on MR jobs (kasha)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -228,6 +230,10 @@ Release 2.3.0 - UNRELEASED
MAPREDUCE-5725. Make explicit that TestNetworkedJob relies on the Capacity MAPREDUCE-5725. Make explicit that TestNetworkedJob relies on the Capacity
Scheduler (Sandy Ryza) Scheduler (Sandy Ryza)
MAPREDUCE-5744. Job hangs because
RMContainerAllocator$AssignedRequests.preemptReduce() violates the
comparator contract (Gera Shegalov via kasha)
OPTIMIZATIONS OPTIMIZATIONS
MAPREDUCE-4680. Job history cleaner should only check timestamps of files in MAPREDUCE-4680. Job history cleaner should only check timestamps of files in
@ -347,6 +353,9 @@ Release 2.3.0 - UNRELEASED
MAPREDUCE-5723. MR AM container log can be truncated or empty. MAPREDUCE-5723. MR AM container log can be truncated or empty.
(Mohammad Kamrul Islam via kasha) (Mohammad Kamrul Islam via kasha)
MAPREDUCE-5743. Fixed the test failure in TestRMContainerAllocator.
(Ted Yu and Vinod Kumar Vavilapalli via zjshen)
Release 2.2.0 - 2013-10-13 Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -1143,9 +1143,9 @@ void preemptReduce(int toPreempt) {
new Comparator<TaskAttemptId>() { new Comparator<TaskAttemptId>() {
@Override @Override
public int compare(TaskAttemptId o1, TaskAttemptId o2) { public int compare(TaskAttemptId o1, TaskAttemptId o2) {
float p = getJob().getTask(o1.getTaskId()).getAttempt(o1).getProgress() - return Float.compare(
getJob().getTask(o2.getTaskId()).getAttempt(o2).getProgress(); getJob().getTask(o1.getTaskId()).getAttempt(o1).getProgress(),
return p >= 0 ? 1 : -1; getJob().getTask(o2.getTaskId()).getAttempt(o2).getProgress());
} }
}); });

View File

@ -1652,8 +1652,16 @@ public void testCompletedTasksRecalculateSchedule() throws Exception {
RMApp app = rm.submitApp(1024); RMApp app = rm.submitApp(1024);
dispatcher.await(); dispatcher.await();
// Make a node to register so as to launch the AM.
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt() ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId(); .getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0); JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job job = mock(Job.class); Job job = mock(Job.class);
when(job.getReport()).thenReturn( when(job.getReport()).thenReturn(

View File

@ -60,6 +60,8 @@ public interface MRJobConfig {
public static final String QUEUE_NAME = "mapreduce.job.queuename"; public static final String QUEUE_NAME = "mapreduce.job.queuename";
public static final String JOB_TAGS = "mapreduce.job.tags";
public static final String JVM_NUMTASKS_TORUN = "mapreduce.job.jvm.numtasks"; public static final String JVM_NUMTASKS_TORUN = "mapreduce.job.jvm.numtasks";
public static final String SPLIT_FILE = "mapreduce.job.splitfile"; public static final String SPLIT_FILE = "mapreduce.job.splitfile";

View File

@ -1,4 +1,5 @@
<?xml version="1.0"?> <?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- <!--
Licensed to the Apache Software Foundation (ASF) under one or more Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with contributor license agreements. See the NOTICE file distributed with
@ -15,7 +16,6 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
--> -->
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Do not modify this file directly. Instead, copy entries that you --> <!-- Do not modify this file directly. Instead, copy entries that you -->
<!-- wish to modify from this file into mapred-site.xml and change them --> <!-- wish to modify from this file into mapred-site.xml and change them -->
@ -727,6 +727,14 @@
</description> </description>
</property> </property>
<property>
<name>mapreduce.job.tags</name>
<value></value>
<description> Tags for the job that will be passed to YARN at submission
time. Queries to YARN for applications can filter on these tags.
</description>
</property>
<property> <property>
<name>mapreduce.cluster.local.dir</name> <name>mapreduce.cluster.local.dir</name>
<value>${hadoop.tmp.dir}/mapred/local</value> <value>${hadoop.tmp.dir}/mapred/local</value>

View File

@ -21,7 +21,9 @@
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Vector; import java.util.Vector;
@ -467,6 +469,8 @@ public ApplicationSubmissionContext createApplicationSubmissionContext(
ContainerLaunchContext.newInstance(localResources, environment, ContainerLaunchContext.newInstance(localResources, environment,
vargsFinal, null, securityTokens, acls); vargsFinal, null, securityTokens, acls);
Collection<String> tagsFromConf =
jobConf.getTrimmedStringCollection(MRJobConfig.JOB_TAGS);
// Set up the ApplicationSubmissionContext // Set up the ApplicationSubmissionContext
ApplicationSubmissionContext appContext = ApplicationSubmissionContext appContext =
@ -486,6 +490,9 @@ public ApplicationSubmissionContext createApplicationSubmissionContext(
MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS)); MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS));
appContext.setResource(capability); appContext.setResource(capability);
appContext.setApplicationType(MRJobConfig.MR_APPLICATION_TYPE); appContext.setApplicationType(MRJobConfig.MR_APPLICATION_TYPE);
if (tagsFromConf != null && !tagsFromConf.isEmpty()) {
appContext.setApplicationTags(new HashSet<String>(tagsFromConf));
}
return appContext; return appContext;
} }

View File

@ -14,6 +14,9 @@ Trunk - Unreleased
YARN-1504. RM changes for moving apps between queues (Sandy Ryza) YARN-1504. RM changes for moving apps between queues (Sandy Ryza)
YARN-1499. Fair Scheduler changes for moving apps between queues (Sandy
Ryza)
IMPROVEMENTS IMPROVEMENTS
OPTIMIZATIONS OPTIMIZATIONS
@ -82,6 +85,24 @@ Release 2.4.0 - UNRELEASED
YARN-1633. Defined user-facing entity, entity-info and event objects related YARN-1633. Defined user-facing entity, entity-info and event objects related
to Application Timeline feature. (Zhijie Shen via vinodkv) to Application Timeline feature. (Zhijie Shen via vinodkv)
YARN-1611. Introduced the concept of a configuration provider which can be
used by ResourceManager to read configuration locally or from remote systems
so as to help RM failover. (Xuan Gong via vinodkv)
YARN-1659. Defined the ApplicationTimelineStore store as an abstraction for
implementing different storage impls for storing timeline information.
(Billie Rinaldi via vinodkv)
YARN-1634. Added a testable in-memory implementation of
ApplicationTimelineStore. (Zhijie Shen via vinodkv)
YARN-1461. Added tags for YARN applications and changed RM to handle them.
(Karthik Kambatla via zjshen)
YARN-1636. Augmented Application-history server's web-services to also expose
new APIs for retrieving and storing timeline information. (Zhijie Shen via
vinodkv)
IMPROVEMENTS IMPROVEMENTS
YARN-1007. Enhance History Reader interface for Containers. (Mayank Bansal via YARN-1007. Enhance History Reader interface for Containers. (Mayank Bansal via
@ -99,6 +120,25 @@ Release 2.4.0 - UNRELEASED
YARN-1617. Remove ancient comment and surround LOG.debug in YARN-1617. Remove ancient comment and surround LOG.debug in
AppSchedulingInfo.allocate (Sandy Ryza) AppSchedulingInfo.allocate (Sandy Ryza)
YARN-1639. Modified RM HA configuration handling to have a way of not
requiring separate configuration files for each RM. (Xuan Gong via vinodkv)
YARN-1668. Modified RM HA handling of admin-acls to be available across RM
failover by making using of a remote configuration-provider. (Xuan Gong via
vinodkv)
YARN-1667. Modified RM HA handling of super users (with proxying ability) to
be available across RM failover by making using of a remote
configuration-provider. (Xuan Gong via vinodkv)
YARN-1285. Changed the default value of yarn.acl.enable in yarn-default.xml
to be consistent with what exists (false) in the code and documentation.
(Kenji Kikushima via vinodkv)
YARN-1669. Modified RM HA handling of protocol level service-ACLS to
be available across RM failover by making using of a remote
configuration-provider. (Xuan Gong via vinodkv)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -148,6 +188,12 @@ Release 2.4.0 - UNRELEASED
YARN-1632. TestApplicationMasterServices should be under YARN-1632. TestApplicationMasterServices should be under
org.apache.hadoop.yarn.server.resourcemanager package (Chen He via jeagles) org.apache.hadoop.yarn.server.resourcemanager package (Chen He via jeagles)
YARN-1673. Fix option parsing in YARN's application CLI after it is broken
by YARN-967. (Mayank Bansal via vinodkv)
YARN-1684. Fixed history server heap size in yarn script. (Billie Rinaldi
via zjshen)
Release 2.3.0 - UNRELEASED Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -581,6 +627,15 @@ Release 2.3.0 - UNRELEASED
YARN-1629. IndexOutOfBoundsException in MaxRunningAppsEnforcer (Sandy Ryza) YARN-1629. IndexOutOfBoundsException in MaxRunningAppsEnforcer (Sandy Ryza)
YARN-1628. Fixed the test failure in TestContainerManagerSecurity. (Vinod
Kumar Vavilapalli via zjshen)
YARN-1661. Fixed DS ApplicationMaster to write the correct exit log. (Vinod
Kumar Vavilapalli via zjshen)
YARN-1689. Made RMAppAttempt get killed when RMApp is at ACCEPTED. (Vinod
Kumar Vavilapalli via zjshen)
Release 2.2.0 - 2013-10-13 Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -204,7 +204,7 @@ elif [ "$COMMAND" = "historyserver" ] ; then
CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/ahs-config/log4j.properties CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/ahs-config/log4j.properties
CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer' CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
YARN_OPTS="$YARN_OPTS $YARN_HISTORYSERVER_OPTS" YARN_OPTS="$YARN_OPTS $YARN_HISTORYSERVER_OPTS"
if [ "$YARN_RESOURCEMANAGER_HEAPSIZE" != "" ]; then if [ "$YARN_HISTORYSERVER_HEAPSIZE" != "" ]; then
JAVA_HEAP_MAX="-Xmx""$YARN_HISTORYSERVER_HEAPSIZE""m" JAVA_HEAP_MAX="-Xmx""$YARN_HISTORYSERVER_HEAPSIZE""m"
fi fi
elif [ "$COMMAND" = "nodemanager" ] ; then elif [ "$COMMAND" = "nodemanager" ] ; then

View File

@ -207,7 +207,7 @@ goto :eof
set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\ahs-config\log4j.properties set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\ahs-config\log4j.properties
set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer
set YARN_OPTS=%YARN_OPTS% %HADOOP_HISTORYSERVER_OPTS% set YARN_OPTS=%YARN_OPTS% %HADOOP_HISTORYSERVER_OPTS%
if defined YARN_RESOURCEMANAGER_HEAPSIZE ( if defined YARN_HISTORYSERVER_HEAPSIZE (
set JAVA_HEAP_MAX=-Xmx%YARN_HISTORYSERVER_HEAPSIZE%m set JAVA_HEAP_MAX=-Xmx%YARN_HISTORYSERVER_HEAPSIZE%m
) )
goto :eof goto :eof

View File

@ -0,0 +1,38 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.protocolrecords;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Enumeration that controls the scope of applications fetched
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public enum ApplicationsRequestScope {
/** All jobs */
ALL,
/** Jobs viewable by current user */
VIEWABLE,
/** Jobs owned by current user */
OWN
}

View File

@ -21,7 +21,6 @@
import java.util.EnumSet; import java.util.EnumSet;
import java.util.Set; import java.util.Set;
import org.apache.commons.collections.buffer.UnboundedFifoBuffer;
import org.apache.commons.lang.math.LongRange; import org.apache.commons.lang.math.LongRange;
import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceAudience.Public;
@ -49,6 +48,86 @@ public static GetApplicationsRequest newInstance() {
return request; return request;
} }
/**
* <p>
* The request from clients to get a report of Applications matching the
* giving application types in the cluster from the
* <code>ResourceManager</code>.
* </p>
*
* @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
*
* <p>Setting any of the parameters to null, would just disable that
* filter</p>
*
* @param scope {@link ApplicationsRequestScope} to filter by
* @param users list of users to filter by
* @param queues list of scheduler queues to filter by
* @param applicationTypes types of applications
* @param applicationTags application tags to filter by
* @param applicationStates application states to filter by
* @param startRange range of application start times to filter by
* @param finishRange range of application finish times to filter by
* @param limit number of applications to limit to
* @return {@link GetApplicationsRequest} to be used with
* {@link ApplicationClientProtocol#getApplications(GetApplicationsRequest)}
*/
@Public
@Stable
public static GetApplicationsRequest newInstance(
ApplicationsRequestScope scope,
Set<String> users,
Set<String> queues,
Set<String> applicationTypes,
Set<String> applicationTags,
EnumSet<YarnApplicationState> applicationStates,
LongRange startRange,
LongRange finishRange,
Long limit) {
GetApplicationsRequest request =
Records.newRecord(GetApplicationsRequest.class);
if (scope != null) {
request.setScope(scope);
}
request.setUsers(users);
request.setQueues(queues);
request.setApplicationTypes(applicationTypes);
request.setApplicationTags(applicationTags);
request.setApplicationStates(applicationStates);
if (startRange != null) {
request.setStartRange(
startRange.getMinimumLong(), startRange.getMaximumLong());
}
if (finishRange != null) {
request.setFinishRange(
finishRange.getMinimumLong(), finishRange.getMaximumLong());
}
if (limit != null) {
request.setLimit(limit);
}
return request;
}
/**
* <p>
* The request from clients to get a report of Applications matching the
* giving application types in the cluster from the
* <code>ResourceManager</code>.
* </p>
*
* @param scope {@link ApplicationsRequestScope} to filter by
* @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
*/
@Public
@Stable
public static GetApplicationsRequest newInstance(
ApplicationsRequestScope scope) {
GetApplicationsRequest request =
Records.newRecord(GetApplicationsRequest.class);
request.setScope(scope);
return request;
}
/** /**
* <p> * <p>
* The request from clients to get a report of Applications matching the * The request from clients to get a report of Applications matching the
@ -257,4 +336,40 @@ public abstract void setStartRange(long begin, long end)
@Private @Private
@Unstable @Unstable
public abstract void setFinishRange(long begin, long end); public abstract void setFinishRange(long begin, long end);
/**
* Get the tags to filter applications on
*
* @return list of tags to filter on
*/
@Private
@Unstable
public abstract Set<String> getApplicationTags();
/**
* Set the list of tags to filter applications on
*
* @param tags list of tags to filter on
*/
@Private
@Unstable
public abstract void setApplicationTags(Set<String> tags);
/**
* Get the {@link ApplicationsRequestScope} of applications to be filtered.
*
* @return {@link ApplicationsRequestScope} of applications to return.
*/
@Private
@Unstable
public abstract ApplicationsRequestScope getScope();
/**
* Set the {@link ApplicationsRequestScope} of applications to filter.
*
* @param scope scope to use for filtering applications
*/
@Private
@Unstable
public abstract void setScope(ApplicationsRequestScope scope);
} }

View File

@ -25,6 +25,8 @@
import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
import java.util.Set;
/** /**
* <p><code>ApplicationReport</code> is a report of an application.</p> * <p><code>ApplicationReport</code> is a report of an application.</p>
* *
@ -321,6 +323,18 @@ public static ApplicationReport newInstance(ApplicationId applicationId,
@Unstable @Unstable
public abstract void setApplicationType(String applicationType); public abstract void setApplicationType(String applicationType);
/**
* Get all tags corresponding to the application
* @return Application's tags
*/
@Public
@Stable
public abstract Set<String> getApplicationTags();
@Private
@Unstable
public abstract void setApplicationTags(Set<String> tags);
@Private @Private
@Stable @Stable
public abstract void setAMRMToken(Token amRmToken); public abstract void setAMRMToken(Token amRmToken);

View File

@ -25,8 +25,11 @@
import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
import java.util.Set;
/** /**
* <p><code>ApplicationSubmissionContext</code> represents all of the * <p><code>ApplicationSubmissionContext</code> represents all of the
* information needed by the <code>ResourceManager</code> to launch * information needed by the <code>ResourceManager</code> to launch
@ -284,7 +287,6 @@ public static ApplicationSubmissionContext newInstance(
@Stable @Stable
public abstract void setApplicationType(String applicationType); public abstract void setApplicationType(String applicationType);
/** /**
* Get the flag which indicates whether to keep containers across application * Get the flag which indicates whether to keep containers across application
* attempts or not. * attempts or not.
@ -314,4 +316,26 @@ public static ApplicationSubmissionContext newInstance(
@Stable @Stable
public abstract void setKeepContainersAcrossApplicationAttempts( public abstract void setKeepContainersAcrossApplicationAttempts(
boolean keepContainers); boolean keepContainers);
/**
* Get tags for the application
*
* @return the application tags
*/
@Public
@Stable
public abstract Set<String> getApplicationTags();
/**
* Set tags for the application. A maximum of
* {@link YarnConfiguration#APPLICATION_MAX_TAGS} are allowed
* per application. Each tag can be at most
* {@link YarnConfiguration#APPLICATION_MAX_TAG_LENGTH}
* characters, and can contain only ASCII characters.
*
* @param tags tags to set
*/
@Public
@Stable
public abstract void setApplicationTags(Set<String> tags);
} }

View File

@ -22,6 +22,7 @@
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry;
import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAccessorType;
@ -49,14 +50,14 @@
@XmlAccessorType(XmlAccessType.NONE) @XmlAccessorType(XmlAccessType.NONE)
@Public @Public
@Unstable @Unstable
public class ATSEntity { public class ATSEntity implements Comparable<ATSEntity> {
private String entityType; private String entityType;
private String entityId; private String entityId;
private long startTime; private Long startTime;
private List<ATSEvent> events = new ArrayList<ATSEvent>(); private List<ATSEvent> events = new ArrayList<ATSEvent>();
private Map<String, List<Object>> relatedEntities = private Map<String, List<String>> relatedEntities =
new HashMap<String, List<Object>>(); new HashMap<String, List<String>>();
private Map<String, Object> primaryFilters = private Map<String, Object> primaryFilters =
new HashMap<String, Object>(); new HashMap<String, Object>();
private Map<String, Object> otherInfo = private Map<String, Object> otherInfo =
@ -112,7 +113,7 @@ public void setEntityId(String entityId) {
* @return the start time of the entity * @return the start time of the entity
*/ */
@XmlElement(name = "starttime") @XmlElement(name = "starttime")
public long getStartTime() { public Long getStartTime() {
return startTime; return startTime;
} }
@ -122,7 +123,7 @@ public long getStartTime() {
* @param startTime * @param startTime
* the start time of the entity * the start time of the entity
*/ */
public void setStartTime(long startTime) { public void setStartTime(Long startTime) {
this.startTime = startTime; this.startTime = startTime;
} }
@ -172,26 +173,25 @@ public void setEvents(List<ATSEvent> events) {
* @return the related entities * @return the related entities
*/ */
@XmlElement(name = "relatedentities") @XmlElement(name = "relatedentities")
public Map<String, List<Object>> getRelatedEntities() { public Map<String, List<String>> getRelatedEntities() {
return relatedEntities; return relatedEntities;
} }
/** /**
* Add a list of entity of the same type to the existing related entity map * Add an entity to the existing related entity map
* *
* @param entityType * @param entityType
* the entity type * the entity type
* @param entityIds * @param entityId
* a list of entity Ids * the entity Id
*/ */
public void addRelatedEntity(String entityType, List<Object> entityIds) { public void addRelatedEntity(String entityType, String entityId) {
List<Object> thisRelatedEntity = relatedEntities.get(entityType); List<String> thisRelatedEntity = relatedEntities.get(entityType);
relatedEntities.put(entityType, entityIds);
if (thisRelatedEntity == null) { if (thisRelatedEntity == null) {
relatedEntities.put(entityType, entityIds); thisRelatedEntity = new ArrayList<String>();
} else { relatedEntities.put(entityType, thisRelatedEntity);
thisRelatedEntity.addAll(entityIds);
} }
thisRelatedEntity.add(entityId);
} }
/** /**
@ -200,11 +200,10 @@ public void addRelatedEntity(String entityType, List<Object> entityIds) {
* @param relatedEntities * @param relatedEntities
* a map of related entities * a map of related entities
*/ */
public void addRelatedEntities( public void addRelatedEntities(Map<String, List<String>> relatedEntities) {
Map<String, List<Object>> relatedEntities) { for (Entry<String, List<String>> relatedEntity :
for (Map.Entry<String, List<Object>> relatedEntity : relatedEntities relatedEntities.entrySet()) {
.entrySet()) { List<String> thisRelatedEntity =
List<Object> thisRelatedEntity =
this.relatedEntities.get(relatedEntity.getKey()); this.relatedEntities.get(relatedEntity.getKey());
if (thisRelatedEntity == null) { if (thisRelatedEntity == null) {
this.relatedEntities.put( this.relatedEntities.put(
@ -222,7 +221,7 @@ public void addRelatedEntities(
* a map of related entities * a map of related entities
*/ */
public void setRelatedEntities( public void setRelatedEntities(
Map<String, List<Object>> relatedEntities) { Map<String, List<String>> relatedEntities) {
this.relatedEntities = relatedEntities; this.relatedEntities = relatedEntities;
} }
@ -311,4 +310,92 @@ public void setOtherInfo(Map<String, Object> otherInfo) {
this.otherInfo = otherInfo; this.otherInfo = otherInfo;
} }
@Override
public int hashCode() {
// generated by eclipse
final int prime = 31;
int result = 1;
result = prime * result + ((entityId == null) ? 0 : entityId.hashCode());
result =
prime * result + ((entityType == null) ? 0 : entityType.hashCode());
result = prime * result + ((events == null) ? 0 : events.hashCode());
result = prime * result + ((otherInfo == null) ? 0 : otherInfo.hashCode());
result =
prime * result
+ ((primaryFilters == null) ? 0 : primaryFilters.hashCode());
result =
prime * result
+ ((relatedEntities == null) ? 0 : relatedEntities.hashCode());
result = prime * result + ((startTime == null) ? 0 : startTime.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
// generated by eclipse
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ATSEntity other = (ATSEntity) obj;
if (entityId == null) {
if (other.entityId != null)
return false;
} else if (!entityId.equals(other.entityId))
return false;
if (entityType == null) {
if (other.entityType != null)
return false;
} else if (!entityType.equals(other.entityType))
return false;
if (events == null) {
if (other.events != null)
return false;
} else if (!events.equals(other.events))
return false;
if (otherInfo == null) {
if (other.otherInfo != null)
return false;
} else if (!otherInfo.equals(other.otherInfo))
return false;
if (primaryFilters == null) {
if (other.primaryFilters != null)
return false;
} else if (!primaryFilters.equals(other.primaryFilters))
return false;
if (relatedEntities == null) {
if (other.relatedEntities != null)
return false;
} else if (!relatedEntities.equals(other.relatedEntities))
return false;
if (startTime == null) {
if (other.startTime != null)
return false;
} else if (!startTime.equals(other.startTime))
return false;
return true;
}
@Override
public int compareTo(ATSEntity other) {
int comparison = entityType.compareTo(other.entityType);
if (comparison == 0) {
long thisStartTime =
startTime == null ? Long.MIN_VALUE : startTime;
long otherStartTime =
other.startTime == null ? Long.MIN_VALUE : other.startTime;
if (thisStartTime > otherStartTime) {
return -1;
} else if (thisStartTime < otherStartTime) {
return 1;
} else {
return entityId.compareTo(other.entityId);
}
} else {
return comparison;
}
}
} }

View File

@ -39,7 +39,7 @@
@XmlAccessorType(XmlAccessType.NONE) @XmlAccessorType(XmlAccessType.NONE)
@Public @Public
@Unstable @Unstable
public class ATSEvent { public class ATSEvent implements Comparable<ATSEvent> {
private long timestamp; private long timestamp;
private String eventType; private String eventType;
@ -131,4 +131,42 @@ public void setEventInfo(Map<String, Object> eventInfo) {
this.eventInfo = eventInfo; this.eventInfo = eventInfo;
} }
@Override
public int compareTo(ATSEvent other) {
if (timestamp > other.timestamp) {
return -1;
} else if (timestamp < other.timestamp) {
return 1;
} else {
return eventType.compareTo(other.eventType);
}
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ATSEvent atsEvent = (ATSEvent) o;
if (timestamp != atsEvent.timestamp)
return false;
if (!eventType.equals(atsEvent.eventType))
return false;
if (eventInfo != null ? !eventInfo.equals(atsEvent.eventInfo) :
atsEvent.eventInfo != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (timestamp ^ (timestamp >>> 32));
result = 31 * result + eventType.hashCode();
result = 31 * result + (eventInfo != null ? eventInfo.hashCode() : 0);
return result;
}
} }

View File

@ -160,7 +160,7 @@ public List<ATSEvent> getEvents() {
* @param event * @param event
* a single event * a single event
*/ */
public void addEntity(ATSEvent event) { public void addEvent(ATSEvent event) {
events.add(event); events.add(event);
} }

View File

@ -0,0 +1,163 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.records.apptimeline;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import java.util.ArrayList;
import java.util.List;
/**
* A class that holds a list of put errors. This is the response returned
* when a list of {@link ATSEntity} objects is added to the application
* timeline. If there are errors in storing individual entity objects,
* they will be indicated in the list of errors.
*/
@XmlRootElement(name = "errors")
@XmlAccessorType(XmlAccessType.NONE)
@Public
@Unstable
public class ATSPutErrors {
private List<ATSPutError> errors = new ArrayList<ATSPutError>();
public ATSPutErrors() {
}
/**
* Get a list of {@link ATSPutError} instances
*
* @return a list of {@link ATSPutError} instances
*/
@XmlElement(name = "errors")
public List<ATSPutError> getErrors() {
return errors;
}
/**
* Add a single {@link ATSPutError} instance into the existing list
*
* @param error
* a single {@link ATSPutError} instance
*/
public void addError(ATSPutError error) {
errors.add(error);
}
/**
* Add a list of {@link ATSPutError} instances into the existing list
*
* @param errors
* a list of {@link ATSPutError} instances
*/
public void addErrors(List<ATSPutError> errors) {
this.errors.addAll(errors);
}
/**
* Set the list to the given list of {@link ATSPutError} instances
*
* @param errors
* a list of {@link ATSPutError} instances
*/
public void setErrors(List<ATSPutError> errors) {
this.errors.clear();
this.errors.addAll(errors);
}
/**
* A class that holds the error code for one entity.
*/
@XmlRootElement(name = "error")
@XmlAccessorType(XmlAccessType.NONE)
@Public
@Unstable
public static class ATSPutError {
private String entityId;
private String entityType;
private Integer errorCode;
/**
* Get the entity Id
*
* @return the entity Id
*/
@XmlElement(name = "entity")
public String getEntityId() {
return entityId;
}
/**
* Set the entity Id
*
* @param entityId
* the entity Id
*/
public void setEntityId(String entityId) {
this.entityId = entityId;
}
/**
* Get the entity type
*
* @return the entity type
*/
@XmlElement(name = "entitytype")
public String getEntityType() {
return entityType;
}
/**
* Set the entity type
*
* @param entityType
* the entity type
*/
public void setEntityType(String entityType) {
this.entityType = entityType;
}
/**
* Get the error code
*
* @return an error code
*/
@XmlElement(name = "errorcode")
public Integer getErrorCode() {
return errorCode;
}
/**
* Set the error code to the given error code
*
* @param errorCode
* an error code
*/
public void setErrorCode(Integer errorCode) {
this.errorCode = errorCode;
}
}
}

View File

@ -0,0 +1,64 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.conf;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.exceptions.YarnException;
@Private
@Unstable
/**
* Base class to implement ConfigurationProvider.
* Real ConfigurationProvider implementations need to derive from it and
* implement load methods to actually load the configuration.
*/
public abstract class ConfigurationProvider {
public void init(Configuration conf) throws Exception {
initInternal(conf);
}
public void close() throws Exception {
closeInternal();
}
/**
* Get the configuration.
* @param name The configuration file name
* @return configuration
* @throws YarnException
* @throws IOException
*/
public abstract Configuration getConfiguration(String name)
throws YarnException, IOException;
/**
* Derived classes initialize themselves using this method.
*/
public abstract void initInternal(Configuration conf) throws Exception;
/**
* Derived classes close themselves using this method.
*/
public abstract void closeInternal() throws Exception;
}

View File

@ -0,0 +1,57 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.conf;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@Private
@Unstable
/**
* Factory for {@link ConfigurationProvider} implementations.
*/
public class ConfigurationProviderFactory {
/**
* Creates an instance of {@link ConfigurationProvider} using given
* configuration.
* @param conf
* @return configurationProvider
*/
@SuppressWarnings("unchecked")
public static ConfigurationProvider
getConfigurationProvider(Configuration conf) {
Class<? extends ConfigurationProvider> defaultProviderClass;
try {
defaultProviderClass = (Class<? extends ConfigurationProvider>)
Class.forName(
YarnConfiguration.DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS);
} catch (Exception e) {
throw new YarnRuntimeException(
"Invalid default configuration provider class"
+ YarnConfiguration.DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS, e);
}
ConfigurationProvider configurationProvider = ReflectionUtils.newInstance(
conf.getClass(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
defaultProviderClass, ConfigurationProvider.class), conf);
return configurationProvider;
}
}

View File

@ -21,10 +21,13 @@
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import java.net.InetSocketAddress;
import java.util.Collection; import java.util.Collection;
@InterfaceAudience.Private @InterfaceAudience.Private
@ -108,8 +111,7 @@ private static void verifyAndSetRMHAIdsList(Configuration conf) {
String errmsg = iae.getMessage(); String errmsg = iae.getMessage();
if (confKey == null) { if (confKey == null) {
// Error at addSuffix // Error at addSuffix
errmsg = getInvalidValueMessage(YarnConfiguration.RM_HA_ID, errmsg = getInvalidValueMessage(YarnConfiguration.RM_HA_ID, id);
getRMHAId(conf));
} }
throwBadConfigurationException(errmsg); throwBadConfigurationException(errmsg);
} }
@ -122,10 +124,18 @@ private static void verifyAndSetRMHAIdsList(Configuration conf) {
} }
private static void verifyAndSetCurrentRMHAId(Configuration conf) { private static void verifyAndSetCurrentRMHAId(Configuration conf) {
String rmId = conf.getTrimmed(YarnConfiguration.RM_HA_ID); String rmId = getRMHAId(conf);
if (rmId == null) { if (rmId == null) {
throwBadConfigurationException( StringBuilder msg = new StringBuilder();
getNeedToSetValueMessage(YarnConfiguration.RM_HA_ID)); msg.append("Can not find valid RM_HA_ID. None of ");
for (String id : conf
.getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS)) {
msg.append(addSuffix(YarnConfiguration.RM_ADDRESS, id) + " ");
}
msg.append(" are matching" +
" the local address OR " + YarnConfiguration.RM_HA_ID + " is not" +
" specified in HA Configuration");
throwBadConfigurationException(msg.toString());
} else { } else {
Collection<String> ids = getRMHAIds(conf); Collection<String> ids = getRMHAIds(conf);
if (!ids.contains(rmId)) { if (!ids.contains(rmId)) {
@ -179,7 +189,34 @@ public static Collection<String> getRMHAIds(Configuration conf) {
* @return RM Id on success * @return RM Id on success
*/ */
public static String getRMHAId(Configuration conf) { public static String getRMHAId(Configuration conf) {
return conf.get(YarnConfiguration.RM_HA_ID); int found = 0;
String currentRMId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
if(currentRMId == null) {
for(String rmId : getRMHAIds(conf)) {
String key = addSuffix(YarnConfiguration.RM_ADDRESS, rmId);
String addr = conf.get(key);
if (addr == null) {
continue;
}
InetSocketAddress s;
try {
s = NetUtils.createSocketAddr(addr);
} catch (Exception e) {
LOG.warn("Exception in creating socket address " + addr, e);
continue;
}
if (!s.isUnresolved() && NetUtils.isLocalAddress(s.getAddress())) {
currentRMId = rmId.trim();
found++;
}
}
}
if (found > 1) { // Only one address must match the local address
String msg = "The HA Configuration has multiple addresses that match "
+ "local node's address.";
throw new HadoopIllegalArgumentException(msg);
}
return currentRMId;
} }
@VisibleForTesting @VisibleForTesting

View File

@ -25,6 +25,7 @@
import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@ -37,8 +38,26 @@
@Evolving @Evolving
public class YarnConfiguration extends Configuration { public class YarnConfiguration extends Configuration {
@Private
public static final String CS_CONFIGURATION_FILE= "capacity-scheduler.xml";
@Private
public static final String HADOOP_POLICY_CONFIGURATION_FILE =
"hadoop-policy.xml";
@Private
public static final String YARN_SITE_XML_FILE = "yarn-site.xml";
@Private
public static final String CORE_SITE_CONFIGURATION_FILE = "core-site.xml";
@Evolving
public static final int APPLICATION_MAX_TAGS = 10;
@Evolving
public static final int APPLICATION_MAX_TAG_LENGTH = 100;
private static final String YARN_DEFAULT_XML_FILE = "yarn-default.xml"; private static final String YARN_DEFAULT_XML_FILE = "yarn-default.xml";
private static final String YARN_SITE_XML_FILE = "yarn-site.xml";
static { static {
Configuration.addDefaultResource(YARN_DEFAULT_XML_FILE); Configuration.addDefaultResource(YARN_DEFAULT_XML_FILE);
@ -329,6 +348,16 @@ public class YarnConfiguration extends Configuration {
public static final String RM_HA_IDS = RM_HA_PREFIX + "rm-ids"; public static final String RM_HA_IDS = RM_HA_PREFIX + "rm-ids";
public static final String RM_HA_ID = RM_HA_PREFIX + "id"; public static final String RM_HA_ID = RM_HA_PREFIX + "id";
/** Store the related configuration files in File System */
public static final String FS_BASED_RM_CONF_STORE = RM_PREFIX
+ "configuration.file-system-based-store";
public static final String DEFAULT_FS_BASED_RM_CONF_STORE = "/yarn/conf";
public static final String RM_CONFIGURATION_PROVIDER_CLASS = RM_PREFIX
+ "configuration.provider-class";
public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
"org.apache.hadoop.yarn.LocalConfigurationProvider";
@Private @Private
public static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS = public static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS =
Collections.unmodifiableList(Arrays.asList( Collections.unmodifiableList(Arrays.asList(
@ -999,6 +1028,15 @@ public class YarnConfiguration extends Configuration {
public static final String AHS_WEBAPP_SPNEGO_KEYTAB_FILE_KEY = public static final String AHS_WEBAPP_SPNEGO_KEYTAB_FILE_KEY =
AHS_PREFIX + "webapp.spnego-keytab-file"; AHS_PREFIX + "webapp.spnego-keytab-file";
////////////////////////////////
// ATS Configs
////////////////////////////////
public static final String ATS_PREFIX = YARN_PREFIX + "ats.";
/** ATS store class */
public static final String ATS_STORE = ATS_PREFIX + "store.class";
//////////////////////////////// ////////////////////////////////
// Other Configs // Other Configs
//////////////////////////////// ////////////////////////////////

View File

@ -190,6 +190,7 @@ message ApplicationReportProto {
optional float progress = 17; optional float progress = 17;
optional string applicationType = 18; optional string applicationType = 18;
optional hadoop.common.TokenProto am_rm_token = 19; optional hadoop.common.TokenProto am_rm_token = 19;
repeated string applicationTags = 20;
} }
message ApplicationAttemptReportProto { message ApplicationAttemptReportProto {
@ -287,6 +288,7 @@ message ApplicationSubmissionContextProto {
optional ResourceProto resource = 9; optional ResourceProto resource = 9;
optional string applicationType = 10 [default = "YARN"]; optional string applicationType = 10 [default = "YARN"];
optional bool keep_containers_across_application_attempts = 11 [default = false]; optional bool keep_containers_across_application_attempts = 11 [default = false];
repeated string applicationTags = 12;
} }
enum ApplicationAccessTypeProto { enum ApplicationAccessTypeProto {

View File

@ -136,6 +136,12 @@ message MoveApplicationAcrossQueuesRequestProto {
message MoveApplicationAcrossQueuesResponseProto { message MoveApplicationAcrossQueuesResponseProto {
} }
enum ApplicationsRequestScopeProto {
ALL = 0;
VIEWABLE = 1;
OWN = 2;
}
message GetApplicationsRequestProto { message GetApplicationsRequestProto {
repeated string application_types = 1; repeated string application_types = 1;
repeated YarnApplicationStateProto application_states = 2; repeated YarnApplicationStateProto application_states = 2;
@ -146,6 +152,8 @@ message GetApplicationsRequestProto {
optional int64 start_end = 7; optional int64 start_end = 7;
optional int64 finish_begin = 8; optional int64 finish_begin = 8;
optional int64 finish_end = 9; optional int64 finish_end = 9;
repeated string applicationTags = 10;
optional ApplicationsRequestScopeProto scope = 11 [default = ALL];
} }
message GetApplicationsResponseProto { message GetApplicationsResponseProto {

View File

@ -232,7 +232,6 @@ public class ApplicationMaster {
private static final String shellArgsPath = "shellArgs"; private static final String shellArgsPath = "shellArgs";
private volatile boolean done; private volatile boolean done;
private volatile boolean success;
private ByteBuffer allTokens; private ByteBuffer allTokens;
@ -254,8 +253,8 @@ public static void main(String[] args) {
if (!doRun) { if (!doRun) {
System.exit(0); System.exit(0);
} }
result = appMaster.run(); appMaster.run();
appMaster.finish(); result = appMaster.finish();
} catch (Throwable t) { } catch (Throwable t) {
LOG.fatal("Error running ApplicationMaster", t); LOG.fatal("Error running ApplicationMaster", t);
System.exit(1); System.exit(1);
@ -480,7 +479,7 @@ private void printUsage(Options opts) {
* @throws IOException * @throws IOException
*/ */
@SuppressWarnings({ "unchecked" }) @SuppressWarnings({ "unchecked" })
public boolean run() throws YarnException, IOException { public void run() throws YarnException, IOException {
LOG.info("Starting ApplicationMaster"); LOG.info("Starting ApplicationMaster");
Credentials credentials = Credentials credentials =
@ -561,7 +560,6 @@ public boolean run() throws YarnException, IOException {
amRMClient.addContainerRequest(containerAsk); amRMClient.addContainerRequest(containerAsk);
} }
numRequestedContainers.set(numTotalContainersToRequest); numRequestedContainers.set(numTotalContainersToRequest);
return success;
} }
@VisibleForTesting @VisibleForTesting
@ -569,7 +567,8 @@ NMCallbackHandler createNMCallbackHandler() {
return new NMCallbackHandler(this); return new NMCallbackHandler(this);
} }
protected void finish() { @VisibleForTesting
protected boolean finish() {
// wait for completion. // wait for completion.
while (!done while (!done
&& (numCompletedContainers.get() != numTotalContainers)) { && (numCompletedContainers.get() != numTotalContainers)) {
@ -600,7 +599,7 @@ protected void finish() {
FinalApplicationStatus appStatus; FinalApplicationStatus appStatus;
String appMessage = null; String appMessage = null;
success = true; boolean success = true;
if (numFailedContainers.get() == 0 && if (numFailedContainers.get() == 0 &&
numCompletedContainers.get() == numTotalContainers) { numCompletedContainers.get() == numTotalContainers) {
appStatus = FinalApplicationStatus.SUCCEEDED; appStatus = FinalApplicationStatus.SUCCEEDED;
@ -621,6 +620,8 @@ protected void finish() {
} }
amRMClient.stop(); amRMClient.stop();
return success;
} }
private class RMCallbackHandler implements AMRMClientAsync.CallbackHandler { private class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {

View File

@ -18,13 +18,13 @@
package org.apache.hadoop.yarn.applications.distributedshell; package org.apache.hadoop.yarn.applications.distributedshell;
import java.nio.ByteBuffer;
import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import java.nio.ByteBuffer;
import java.util.Map;
public class ContainerLaunchFailAppMaster extends ApplicationMaster { public class ContainerLaunchFailAppMaster extends ApplicationMaster {
private static final Log LOG = private static final Log LOG =
@ -66,8 +66,8 @@ public static void main(String[] args) {
if (!doRun) { if (!doRun) {
System.exit(0); System.exit(0);
} }
result = appMaster.run(); appMaster.run();
appMaster.finish(); result = appMaster.finish();
} catch (Throwable t) { } catch (Throwable t) {
LOG.fatal("Error running ApplicationMaster", t); LOG.fatal("Error running ApplicationMaster", t);
System.exit(1); System.exit(1);

View File

@ -29,8 +29,8 @@ public class TestDSFailedAppMaster extends ApplicationMaster {
private static final Log LOG = LogFactory.getLog(TestDSFailedAppMaster.class); private static final Log LOG = LogFactory.getLog(TestDSFailedAppMaster.class);
@Override @Override
public boolean run() throws YarnException, IOException { public void run() throws YarnException, IOException {
boolean res = super.run(); super.run();
// for the 2nd attempt. // for the 2nd attempt.
if (appAttemptID.getAttemptId() == 2) { if (appAttemptID.getAttemptId() == 2) {
@ -39,11 +39,12 @@ public boolean run() throws YarnException, IOException {
// numRequestedContainers should be set to 0. // numRequestedContainers should be set to 0.
if (numAllocatedContainers.get() != 1 if (numAllocatedContainers.get() != 1
|| numRequestedContainers.get() != 0) { || numRequestedContainers.get() != 0) {
LOG.info("Application Master failed. exiting"); LOG.info("NumAllocatedContainers is " + numAllocatedContainers.get()
+ " and NumRequestedContainers is " + numAllocatedContainers.get()
+ ".Application Master failed. exiting");
System.exit(200); System.exit(200);
} }
} }
return res;
} }
public static void main(String[] args) { public static void main(String[] args) {
@ -54,7 +55,7 @@ public static void main(String[] args) {
if (!doRun) { if (!doRun) {
System.exit(0); System.exit(0);
} }
result = appMaster.run(); appMaster.run();
if (appMaster.appAttemptID.getAttemptId() == 1) { if (appMaster.appAttemptID.getAttemptId() == 1) {
try { try {
// sleep some time, wait for the AM to launch a container. // sleep some time, wait for the AM to launch a container.
@ -63,7 +64,7 @@ public static void main(String[] args) {
// fail the first am. // fail the first am.
System.exit(100); System.exit(100);
} }
appMaster.finish(); result = appMaster.finish();
} catch (Throwable t) { } catch (Throwable t) {
System.exit(1); System.exit(1);
} }

View File

@ -197,7 +197,7 @@ public int run(String[] args) throws Exception {
listApplications(appTypes, appStates); listApplications(appTypes, appStates);
} }
} else if (cliParser.hasOption(KILL_CMD)) { } else if (cliParser.hasOption(KILL_CMD)) {
if (args.length != 2) { if (args.length != 3) {
printUsage(opts); printUsage(opts);
return exitCode; return exitCode;
} }

View File

@ -681,7 +681,7 @@ public void testAppsHelpCommand() throws Exception {
sysOutStream.reset(); sysOutStream.reset();
ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
result = result =
cli.run(new String[] { "-kill", applicationId.toString(), "args" }); cli.run(new String[] {"application", "-kill", applicationId.toString(), "args" });
verify(spyCli).printUsage(any(Options.class)); verify(spyCli).printUsage(any(Options.class));
Assert.assertEquals(createApplicationCLIHelpMessage(), Assert.assertEquals(createApplicationCLIHelpMessage(),
sysOutStream.toString()); sysOutStream.toString());
@ -717,7 +717,7 @@ public void testKillApplication() throws Exception {
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn( when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
newApplicationReport2); newApplicationReport2);
int result = cli.run(new String[] { "-kill", applicationId.toString() }); int result = cli.run(new String[] { "application","-kill", applicationId.toString() });
assertEquals(0, result); assertEquals(0, result);
verify(client, times(0)).killApplication(any(ApplicationId.class)); verify(client, times(0)).killApplication(any(ApplicationId.class));
verify(sysOut).println( verify(sysOut).println(
@ -730,7 +730,7 @@ public void testKillApplication() throws Exception {
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn( when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
newApplicationReport); newApplicationReport);
result = cli.run(new String[] { "-kill", applicationId.toString() }); result = cli.run(new String[] { "application","-kill", applicationId.toString() });
assertEquals(0, result); assertEquals(0, result);
verify(client).killApplication(any(ApplicationId.class)); verify(client).killApplication(any(ApplicationId.class));
verify(sysOut).println("Killing application application_1234_0005"); verify(sysOut).println("Killing application application_1234_0005");
@ -740,7 +740,7 @@ public void testKillApplication() throws Exception {
.getApplicationReport(applicationId); .getApplicationReport(applicationId);
cli = createAndGetAppCLI(); cli = createAndGetAppCLI();
try { try {
cli.run(new String[] { "-kill", applicationId.toString() }); cli.run(new String[] { "application","-kill", applicationId.toString() });
Assert.fail(); Assert.fail();
} catch (Exception ex) { } catch (Exception ex) {
Assert.assertTrue(ex instanceof ApplicationNotFoundException); Assert.assertTrue(ex instanceof ApplicationNotFoundException);

View File

@ -0,0 +1,72 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.conf.ConfigurationProvider;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
@Private
@Unstable
public class FileSystemBasedConfigurationProvider
extends ConfigurationProvider {
private static final Log LOG = LogFactory
.getLog(FileSystemBasedConfigurationProvider.class);
private FileSystem fs;
private Path configDir;
@Override
public synchronized Configuration getConfiguration(String name)
throws IOException, YarnException {
Path configPath = new Path(this.configDir, name);
if (!fs.exists(configPath)) {
throw new YarnException("Can not find Configuration: " + name + " in "
+ configDir);
}
Configuration conf = new Configuration(false);
conf.addResource(fs.open(configPath));
return conf;
}
@Override
public synchronized void initInternal(Configuration conf) throws Exception {
configDir =
new Path(conf.get(YarnConfiguration.FS_BASED_RM_CONF_STORE,
YarnConfiguration.DEFAULT_FS_BASED_RM_CONF_STORE));
fs = configDir.getFileSystem(conf);
if (!fs.exists(configDir)) {
fs.mkdirs(configDir);
}
}
@Override
public synchronized void closeInternal() throws Exception {
fs.close();
}
}

View File

@ -0,0 +1,48 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.ConfigurationProvider;
import org.apache.hadoop.yarn.exceptions.YarnException;
@Private
@Unstable
public class LocalConfigurationProvider extends ConfigurationProvider {
@Override
public Configuration getConfiguration(String name)
throws IOException, YarnException {
return new Configuration();
}
@Override
public void initInternal(Configuration conf) throws Exception {
// Do nothing
}
@Override
public void closeInternal() throws Exception {
// Do nothing
}
}

View File

@ -27,6 +27,7 @@
import org.apache.commons.lang.math.LongRange; import org.apache.commons.lang.math.LongRange;
import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
@ -49,6 +50,8 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
Set<String> queues = null; Set<String> queues = null;
long limit = Long.MAX_VALUE; long limit = Long.MAX_VALUE;
LongRange start = null, finish = null; LongRange start = null, finish = null;
private Set<String> applicationTags;
private ApplicationsRequestScope scope;
public GetApplicationsRequestPBImpl() { public GetApplicationsRequestPBImpl() {
builder = GetApplicationsRequestProto.newBuilder(); builder = GetApplicationsRequestProto.newBuilder();
@ -112,6 +115,12 @@ public void remove() {
}; };
builder.addAllApplicationStates(iterable); builder.addAllApplicationStates(iterable);
} }
if (this.applicationTags != null && !this.applicationTags.isEmpty()) {
builder.addAllApplicationTags(this.applicationTags);
}
if (this.scope != null) {
builder.setScope(ProtoUtils.convertToProtoFormat(scope));
}
} }
private void addLocalApplicationTypesToProto() { private void addLocalApplicationTypesToProto() {
@ -187,12 +196,64 @@ public void setApplicationTypes(Set<String> applicationTypes) {
this.applicationTypes = applicationTypes; this.applicationTypes = applicationTypes;
} }
private void initApplicationTags() {
if (this.applicationTags != null) {
return;
}
GetApplicationsRequestProtoOrBuilder p = viaProto ? proto : builder;
this.applicationTags = new HashSet<String>();
this.applicationTags.addAll(p.getApplicationTagsList());
}
@Override
public Set<String> getApplicationTags() {
initApplicationTags();
return this.applicationTags;
}
@Override
public void setApplicationTags(Set<String> tags) {
maybeInitBuilder();
if (tags == null || tags.isEmpty()) {
builder.clearApplicationTags();
this.applicationTags = null;
return;
}
// Convert applicationTags to lower case and add
this.applicationTags = new HashSet<String>();
for (String tag : tags) {
this.applicationTags.add(tag.toLowerCase());
}
}
@Override @Override
public EnumSet<YarnApplicationState> getApplicationStates() { public EnumSet<YarnApplicationState> getApplicationStates() {
initApplicationStates(); initApplicationStates();
return this.applicationStates; return this.applicationStates;
} }
private void initScope() {
if (this.scope != null) {
return;
}
GetApplicationsRequestProtoOrBuilder p = viaProto ? proto : builder;
this.scope = ProtoUtils.convertFromProtoFormat(p.getScope());
}
@Override
public ApplicationsRequestScope getScope() {
initScope();
return this.scope;
}
public void setScope(ApplicationsRequestScope scope) {
maybeInitBuilder();
if (scope == null) {
builder.clearScope();
}
this.scope = scope;
}
@Override @Override
public void setApplicationStates(EnumSet<YarnApplicationState> applicationStates) { public void setApplicationStates(EnumSet<YarnApplicationState> applicationStates) {
maybeInitBuilder(); maybeInitBuilder();
@ -223,7 +284,6 @@ public Set<String> getUsers() {
return this.users; return this.users;
} }
@Override
public void setUsers(Set<String> users) { public void setUsers(Set<String> users) {
maybeInitBuilder(); maybeInitBuilder();
if (users == null) { if (users == null) {

View File

@ -38,6 +38,9 @@
import com.google.protobuf.TextFormat; import com.google.protobuf.TextFormat;
import java.util.HashSet;
import java.util.Set;
@Private @Private
@Unstable @Unstable
public class ApplicationReportPBImpl extends ApplicationReport { public class ApplicationReportPBImpl extends ApplicationReport {
@ -49,6 +52,7 @@ public class ApplicationReportPBImpl extends ApplicationReport {
private ApplicationAttemptId currentApplicationAttemptId; private ApplicationAttemptId currentApplicationAttemptId;
private Token clientToAMToken = null; private Token clientToAMToken = null;
private Token amRmToken = null; private Token amRmToken = null;
private Set<String> applicationTags = null;
public ApplicationReportPBImpl() { public ApplicationReportPBImpl() {
builder = ApplicationReportProto.newBuilder(); builder = ApplicationReportProto.newBuilder();
@ -245,6 +249,21 @@ public Token getAMRMToken() {
return amRmToken; return amRmToken;
} }
private void initApplicationTags() {
if (this.applicationTags != null) {
return;
}
ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
this.applicationTags = new HashSet<String>();
this.applicationTags.addAll(p.getApplicationTagsList());
}
@Override
public Set<String> getApplicationTags() {
initApplicationTags();
return this.applicationTags;
}
@Override @Override
public void setApplicationId(ApplicationId applicationId) { public void setApplicationId(ApplicationId applicationId) {
maybeInitBuilder(); maybeInitBuilder();
@ -355,6 +374,15 @@ public void setApplicationType(String applicationType) {
builder.setApplicationType((applicationType)); builder.setApplicationType((applicationType));
} }
@Override
public void setApplicationTags(Set<String> tags) {
maybeInitBuilder();
if (tags == null || tags.isEmpty()) {
builder.clearApplicationTags();
}
this.applicationTags = tags;
}
@Override @Override
public void setDiagnostics(String diagnostics) { public void setDiagnostics(String diagnostics) {
maybeInitBuilder(); maybeInitBuilder();
@ -450,6 +478,9 @@ private void mergeLocalToBuilder() {
builder.getAmRmToken())) { builder.getAmRmToken())) {
builder.setAmRmToken(convertToProtoFormat(this.amRmToken)); builder.setAmRmToken(convertToProtoFormat(this.amRmToken));
} }
if (this.applicationTags != null && !this.applicationTags.isEmpty()) {
builder.addAllApplicationTags(this.applicationTags);
}
} }
private void mergeLocalToProto() { private void mergeLocalToProto() {

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.api.records.impl.pb; package org.apache.hadoop.yarn.api.records.impl.pb;
import com.google.common.base.CharMatcher;
import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
@ -25,6 +26,7 @@
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProtoOrBuilder;
@ -34,6 +36,9 @@
import com.google.protobuf.TextFormat; import com.google.protobuf.TextFormat;
import java.util.HashSet;
import java.util.Set;
@Private @Private
@Unstable @Unstable
public class ApplicationSubmissionContextPBImpl public class ApplicationSubmissionContextPBImpl
@ -47,6 +52,7 @@ public class ApplicationSubmissionContextPBImpl
private Priority priority = null; private Priority priority = null;
private ContainerLaunchContext amContainer = null; private ContainerLaunchContext amContainer = null;
private Resource resource = null; private Resource resource = null;
private Set<String> applicationTags = null;
public ApplicationSubmissionContextPBImpl() { public ApplicationSubmissionContextPBImpl() {
builder = ApplicationSubmissionContextProto.newBuilder(); builder = ApplicationSubmissionContextProto.newBuilder();
@ -100,6 +106,9 @@ private void mergeLocalToBuilder() {
builder.getResource())) { builder.getResource())) {
builder.setResource(convertToProtoFormat(this.resource)); builder.setResource(convertToProtoFormat(this.resource));
} }
if (this.applicationTags != null && !this.applicationTags.isEmpty()) {
builder.addAllApplicationTags(this.applicationTags);
}
} }
private void mergeLocalToProto() { private void mergeLocalToProto() {
@ -197,6 +206,21 @@ public String getApplicationType() {
return (p.getApplicationType()); return (p.getApplicationType());
} }
private void initApplicationTags() {
if (this.applicationTags != null) {
return;
}
ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
this.applicationTags = new HashSet<String>();
this.applicationTags.addAll(p.getApplicationTagsList());
}
@Override
public Set<String> getApplicationTags() {
initApplicationTags();
return this.applicationTags;
}
@Override @Override
public void setQueue(String queue) { public void setQueue(String queue) {
maybeInitBuilder(); maybeInitBuilder();
@ -217,6 +241,40 @@ public void setApplicationType(String applicationType) {
builder.setApplicationType((applicationType)); builder.setApplicationType((applicationType));
} }
private void checkTags(Set<String> tags) {
if (tags.size() > YarnConfiguration.APPLICATION_MAX_TAGS) {
throw new IllegalArgumentException("Too many applicationTags, a maximum of only "
+ YarnConfiguration.APPLICATION_MAX_TAGS + " are allowed!");
}
for (String tag : tags) {
if (tag.length() > YarnConfiguration.APPLICATION_MAX_TAG_LENGTH) {
throw new IllegalArgumentException("Tag " + tag + " is too long, " +
"maximum allowed length of a tag is " +
YarnConfiguration.APPLICATION_MAX_TAG_LENGTH);
}
if (!CharMatcher.ASCII.matchesAllOf(tag)) {
throw new IllegalArgumentException("A tag can only have ASCII " +
"characters! Invalid tag - " + tag);
}
}
}
@Override
public void setApplicationTags(Set<String> tags) {
maybeInitBuilder();
if (tags == null || tags.isEmpty()) {
builder.clearApplicationTags();
this.applicationTags = null;
return;
}
checkTags(tags);
// Convert applicationTags to lower case and add
this.applicationTags = new HashSet<String>();
for (String tag : tags) {
this.applicationTags.add(tag.toLowerCase());
}
}
@Override @Override
public ContainerLaunchContext getAMContainerSpec() { public ContainerLaunchContext getAMContainerSpec() {
ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;

View File

@ -22,6 +22,7 @@
import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
import org.apache.hadoop.yarn.api.records.AMCommand; import org.apache.hadoop.yarn.api.records.AMCommand;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
@ -50,6 +51,7 @@
import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto; import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto;
import com.google.protobuf.ByteString; import com.google.protobuf.ByteString;
import org.apache.hadoop.yarn.proto.YarnServiceProtos;
@Private @Private
@Unstable @Unstable
@ -113,6 +115,18 @@ public static YarnApplicationAttemptState convertFromProtoFormat(
YARN_APPLICATION_ATTEMPT_STATE_PREFIX, "")); YARN_APPLICATION_ATTEMPT_STATE_PREFIX, ""));
} }
/*
* ApplicationsRequestScope
*/
public static YarnServiceProtos.ApplicationsRequestScopeProto
convertToProtoFormat(ApplicationsRequestScope e) {
return YarnServiceProtos.ApplicationsRequestScopeProto.valueOf(e.name());
}
public static ApplicationsRequestScope convertFromProtoFormat
(YarnServiceProtos.ApplicationsRequestScopeProto e) {
return ApplicationsRequestScope.valueOf(e.name());
}
/* /*
* ApplicationResourceUsageReport * ApplicationResourceUsageReport
*/ */

View File

@ -0,0 +1,58 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.ext.Provider;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.codehaus.jackson.jaxrs.JacksonJaxbJsonProvider;
import org.codehaus.jackson.map.AnnotationIntrospector;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.annotate.JsonSerialize.Inclusion;
import org.codehaus.jackson.xc.JaxbAnnotationIntrospector;
import com.google.inject.Singleton;
/**
* YARN's implementation of JAX-RS abstractions based on
* {@link JacksonJaxbJsonProvider} needed for deserialize JSON content to or
* serialize it from POJO objects.
*/
@Singleton
@Provider
@Unstable
@Private
public class YarnJacksonJaxbJsonProvider extends JacksonJaxbJsonProvider {
public YarnJacksonJaxbJsonProvider() {
super();
}
@Override
public ObjectMapper locateMapper(Class<?> type, MediaType mediaType) {
ObjectMapper mapper = super.locateMapper(type, mediaType);
AnnotationIntrospector introspector = new JaxbAnnotationIntrospector();
mapper.setAnnotationIntrospector(introspector);
mapper.getSerializationConfig()
.setSerializationInclusion(Inclusion.NON_NULL);
return mapper;
}
}

View File

@ -130,7 +130,7 @@
<property> <property>
<description>Are acls enabled.</description> <description>Are acls enabled.</description>
<name>yarn.acl.enable</name> <name>yarn.acl.enable</name>
<value>true</value> <value>false</value>
</property> </property>
<property> <property>
@ -395,7 +395,9 @@
the Active mode when prompted to. the Active mode when prompted to.
(2) The nodes in the RM ensemble are listed in (2) The nodes in the RM ensemble are listed in
yarn.resourcemanager.ha.rm-ids yarn.resourcemanager.ha.rm-ids
(3) The id of each RM comes from yarn.resourcemanager.ha.id (3) The id of each RM either comes from yarn.resourcemanager.ha.id
if yarn.resourcemanager.ha.id is explicitly specified or can be
figured out by matching yarn.resourcemanager.address.{id} with local address
(4) The actual physical addresses come from the configs of the pattern (4) The actual physical addresses come from the configs of the pattern
- {rpc-config}.{id}</description> - {rpc-config}.{id}</description>
<name>yarn.resourcemanager.ha.enabled</name> <name>yarn.resourcemanager.ha.enabled</name>
@ -442,7 +444,10 @@
<property> <property>
<description>The id (string) of the current RM. When HA is enabled, this <description>The id (string) of the current RM. When HA is enabled, this
is a required config. See description of yarn.resourcemanager.ha.enabled is an optional config. The id of current RM can be set by explicitly
specifying yarn.resourcemanager.ha.id or figured out by matching
yarn.resourcemanager.address.{id} with local address
See description of yarn.resourcemanager.ha.enabled
for full details on how this is used.</description> for full details on how this is used.</description>
<name>yarn.resourcemanager.ha.id</name> <name>yarn.resourcemanager.ha.id</name>
<!--value>rm1</value--> <!--value>rm1</value-->
@ -588,6 +593,18 @@
<value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value> <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
</property> </property>
<property>
<description>The class to use as the configuration provider.
If org.apache.hadoop.yarn.LocalConfigurationProvider is used,
the local configuration will be loaded.
If org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider is used,
the configuration which will be loaded should be uploaded to remote File system first.
</description>>
<name>yarn.resourcemanager.configuration.provider-class</name>
<value>org.apache.hadoop.yarn.LocalConfigurationProvider</value>
<!-- <value>org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider</value> -->
</property>
<!-- Node Manager Configs --> <!-- Node Manager Configs -->
<property> <property>
<description>The hostname of the NM.</description> <description>The hostname of the NM.</description>
@ -1120,6 +1137,14 @@
<value>org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore</value> <value>org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore</value>
</property> </property>
<!-- Application Timeline Service's Configuration-->
<property>
<description>Store class name for application timeline store</description>
<name>yarn.ats.store.class</name>
<value>org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.MemoryApplicationTimelineStore</value>
</property>
<!-- Other configuration --> <!-- Other configuration -->
<property> <property>
<description>The interval that the yarn client library uses to poll the <description>The interval that the yarn client library uses to poll the

View File

@ -18,10 +18,13 @@
package org.apache.hadoop.yarn.api.records.apptimeline; package org.apache.hadoop.yarn.api.records.apptimeline;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List;
import junit.framework.Assert; import junit.framework.Assert;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors.ATSPutError;
import org.junit.Test; import org.junit.Test;
public class TestApplicationTimelineRecords { public class TestApplicationTimelineRecords {
@ -42,10 +45,8 @@ public void testATSEntities() {
event.addEventInfo("key2", "val2"); event.addEventInfo("key2", "val2");
entity.addEvent(event); entity.addEvent(event);
} }
entity.addRelatedEntity( entity.addRelatedEntity("test ref type 1", "test ref id 1");
"test ref type 1", Arrays.asList((Object) "test ref id 1")); entity.addRelatedEntity("test ref type 2", "test ref id 2");
entity.addRelatedEntity(
"test ref type 2", Arrays.asList((Object) "test ref id 2"));
entity.addPrimaryFilter("pkey1", "pval1"); entity.addPrimaryFilter("pkey1", "pval1");
entity.addPrimaryFilter("pkey2", "pval2"); entity.addPrimaryFilter("pkey2", "pval2");
entity.addOtherInfo("okey1", "oval1"); entity.addOtherInfo("okey1", "oval1");
@ -83,7 +84,7 @@ public void testATSEvents() {
event.setEventType("event type " + i); event.setEventType("event type " + i);
event.addEventInfo("key1", "val1"); event.addEventInfo("key1", "val1");
event.addEventInfo("key2", "val2"); event.addEventInfo("key2", "val2");
partEvents.addEntity(event); partEvents.addEvent(event);
} }
events.addEvent(partEvents); events.addEvent(partEvents);
} }
@ -110,4 +111,36 @@ public void testATSEvents() {
Assert.assertEquals(2, event22.getEventInfo().size()); Assert.assertEquals(2, event22.getEventInfo().size());
} }
@Test
public void testATSPutErrors() {
ATSPutErrors atsPutErrors = new ATSPutErrors();
ATSPutError error1 = new ATSPutError();
error1.setEntityId("entity id 1");
error1.setEntityId("entity type 1");
error1.setErrorCode(1);
atsPutErrors.addError(error1);
List<ATSPutError> errors = new ArrayList<ATSPutError>();
errors.add(error1);
ATSPutError error2 = new ATSPutError();
error2.setEntityId("entity id 2");
error2.setEntityId("entity type 2");
error2.setErrorCode(2);
errors.add(error2);
atsPutErrors.addErrors(errors);
Assert.assertEquals(3, atsPutErrors.getErrors().size());
ATSPutError e = atsPutErrors.getErrors().get(0);
Assert.assertEquals(error1.getEntityId(), e.getEntityId());
Assert.assertEquals(error1.getEntityType(), e.getEntityType());
Assert.assertEquals(error1.getErrorCode(), e.getErrorCode());
e = atsPutErrors.getErrors().get(1);
Assert.assertEquals(error1.getEntityId(), e.getEntityId());
Assert.assertEquals(error1.getEntityType(), e.getEntityType());
Assert.assertEquals(error1.getErrorCode(), e.getErrorCode());
e = atsPutErrors.getErrors().get(2);
Assert.assertEquals(error2.getEntityId(), e.getEntityId());
Assert.assertEquals(error2.getEntityType(), e.getEntityType());
Assert.assertEquals(error2.getErrorCode(), e.getErrorCode());
}
} }

View File

@ -27,11 +27,14 @@
import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.service.Service; import org.apache.hadoop.service.Service;
import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.ApplicationTimelineStore;
import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.MemoryApplicationTimelineStore;
import org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSWebApp; import org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSWebApp;
import org.apache.hadoop.yarn.webapp.WebApp; import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebApps; import org.apache.hadoop.yarn.webapp.WebApps;
@ -51,6 +54,7 @@ public class ApplicationHistoryServer extends CompositeService {
ApplicationHistoryClientService ahsClientService; ApplicationHistoryClientService ahsClientService;
ApplicationHistoryManager historyManager; ApplicationHistoryManager historyManager;
ApplicationTimelineStore timelineStore;
private WebApp webApp; private WebApp webApp;
public ApplicationHistoryServer() { public ApplicationHistoryServer() {
@ -63,6 +67,8 @@ protected void serviceInit(Configuration conf) throws Exception {
ahsClientService = createApplicationHistoryClientService(historyManager); ahsClientService = createApplicationHistoryClientService(historyManager);
addService(ahsClientService); addService(ahsClientService);
addService((Service) historyManager); addService((Service) historyManager);
timelineStore = createApplicationTimelineStore(conf);
addIfService(timelineStore);
super.serviceInit(conf); super.serviceInit(conf);
} }
@ -135,6 +141,15 @@ protected ApplicationHistoryManager createApplicationHistoryManager(
return new ApplicationHistoryManagerImpl(); return new ApplicationHistoryManagerImpl();
} }
protected ApplicationTimelineStore createApplicationTimelineStore(
Configuration conf) {
// TODO: need to replace the MemoryApplicationTimelineStore.class with the
// LevelDB implementation
return ReflectionUtils.newInstance(conf.getClass(
YarnConfiguration.ATS_STORE, MemoryApplicationTimelineStore.class,
ApplicationTimelineStore.class), conf);
}
protected void startWebApp() { protected void startWebApp() {
String bindAddress = WebAppUtils.getAHSWebAppURLWithoutScheme(getConfig()); String bindAddress = WebAppUtils.getAHSWebAppURLWithoutScheme(getConfig());
LOG.info("Instantiating AHSWebApp at " + bindAddress); LOG.info("Instantiating AHSWebApp at " + bindAddress);
@ -148,7 +163,8 @@ protected void startWebApp() {
YarnConfiguration.AHS_WEBAPP_SPNEGO_USER_NAME_KEY) YarnConfiguration.AHS_WEBAPP_SPNEGO_USER_NAME_KEY)
.withHttpSpnegoKeytabKey( .withHttpSpnegoKeytabKey(
YarnConfiguration.AHS_WEBAPP_SPNEGO_KEYTAB_FILE_KEY) YarnConfiguration.AHS_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
.at(bindAddress).start(new AHSWebApp(historyManager)); .at(bindAddress)
.start(new AHSWebApp(historyManager, timelineStore));
} catch (Exception e) { } catch (Exception e) {
String msg = "AHSWebApp failed to start."; String msg = "AHSWebApp failed to start.";
LOG.error(msg, e); LOG.error(msg, e);

View File

@ -0,0 +1,125 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
import java.util.Collection;
import java.util.EnumSet;
import java.util.Set;
import java.util.SortedSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents;
/**
* This interface is for retrieving application timeline information.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public interface ApplicationTimelineReader {
/**
* Possible fields to retrieve for {@link #getEntities} and {@link
* #getEntity}.
*/
enum Field {
EVENTS,
RELATED_ENTITIES,
PRIMARY_FILTERS,
OTHER_INFO,
LAST_EVENT_ONLY
}
/**
* Default limit for {@link #getEntities} and {@link #getEntityTimelines}.
*/
final long DEFAULT_LIMIT = 100;
/**
* This method retrieves a list of entity information, {@link ATSEntity},
* sorted by the starting timestamp for the entity, descending.
*
* @param entityType The type of entities to return (required).
* @param limit A limit on the number of entities to return. If null,
* defaults to {@link #DEFAULT_LIMIT}.
* @param windowStart The earliest start timestamp to retrieve (exclusive).
* If null, defaults to retrieving all entities until the
* limit is reached.
* @param windowEnd The latest start timestamp to retrieve (inclusive).
* If null, defaults to {@link Long#MAX_VALUE}
* @param primaryFilter Retrieves only entities that have the specified
* primary filter. If null, retrieves all entities.
* This is an indexed retrieval, and no entities that
* do not match the filter are scanned.
* @param secondaryFilters Retrieves only entities that have exact matches
* for all the specified filters in their primary
* filters or other info. This is not an indexed
* retrieval, so all entities are scanned but only
* those matching the filters are returned.
* @param fieldsToRetrieve Specifies which fields of the entity object to
* retrieve (see {@link Field}). If the set of fields
* contains {@link Field#LAST_EVENT_ONLY} and not
* {@link Field#EVENTS}, the most recent event for
* each entity is retrieved.
* @return An {@link ATSEntities} object.
*/
ATSEntities getEntities(String entityType,
Long limit, Long windowStart, Long windowEnd,
NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters,
EnumSet<Field> fieldsToRetrieve);
/**
* This method retrieves the entity information for a given entity.
*
* @param entity The entity whose information will be retrieved.
* @param entityType The type of the entity.
* @param fieldsToRetrieve Specifies which fields of the entity object to
* retrieve (see {@link Field}). If the set of
* fields contains {@link Field#LAST_EVENT_ONLY} and
* not {@link Field#EVENTS}, the most recent event
* for each entity is retrieved.
* @return An {@link ATSEntity} object.
*/
ATSEntity getEntity(String entity, String entityType, EnumSet<Field>
fieldsToRetrieve);
/**
* This method retrieves the events for a list of entities all of the same
* entity type. The events for each entity are sorted in order of their
* timestamps, descending.
*
* @param entityType The type of entities to retrieve events for.
* @param entityIds The entity IDs to retrieve events for.
* @param limit A limit on the number of events to return for each entity.
* If null, defaults to {@link #DEFAULT_LIMIT} events per
* entity.
* @param windowStart If not null, retrieves only events later than the
* given time (exclusive)
* @param windowEnd If not null, retrieves only events earlier than the
* given time (inclusive)
* @param eventTypes Restricts the events returned to the given types. If
* null, events of all types will be returned.
* @return An {@link ATSEvents} object.
*/
ATSEvents getEntityTimelines(String entityType,
SortedSet<String> entityIds, Long limit, Long windowStart,
Long windowEnd, Set<String> eventTypes);
}

View File

@ -0,0 +1,29 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.service.Service;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public interface ApplicationTimelineStore extends
Service, ApplicationTimelineReader, ApplicationTimelineWriter {
}

View File

@ -0,0 +1,43 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors;
/**
* This interface is for storing application timeline information.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public interface ApplicationTimelineWriter {
/**
* Stores entity information to the application timeline store. Any errors
* occurring for individual put request objects will be reported in the
* response.
*
* @param data An {@link ATSEntities} object.
* @return An {@link ATSPutErrors} object.
*/
ATSPutErrors put(ATSEntities data);
}

View File

@ -0,0 +1,100 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* The unique identifier for an entity
*/
@Private
@Unstable
public class EntityId implements Comparable<EntityId> {
private String id;
private String type;
public EntityId(String id, String type) {
this.id = id;
this.type = type;
}
/**
* Get the entity Id.
* @return The entity Id.
*/
public String getId() {
return id;
}
/**
* Get the entity type.
* @return The entity type.
*/
public String getType() {
return type;
}
@Override
public int compareTo(EntityId other) {
int c = type.compareTo(other.type);
if (c != 0) return c;
return id.compareTo(other.id);
}
@Override
public int hashCode() {
// generated by eclipse
final int prime = 31;
int result = 1;
result = prime * result + ((id == null) ? 0 : id.hashCode());
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
// generated by eclipse
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EntityId other = (EntityId) obj;
if (id == null) {
if (other.id != null)
return false;
} else if (!id.equals(other.id))
return false;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
@Override
public String toString() {
return "{ id: " + id + ", type: "+ type + " }";
}
}

View File

@ -0,0 +1,288 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Set;
import java.util.SortedSet;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvent;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents.ATSEventsOfOneEntity;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors.ATSPutError;
/**
* In-memory implementation of {@link ApplicationTimelineStore}. This
* implementation is for test purpose only. If users improperly instantiate it,
* they may encounter reading and writing history data in different memory
* store.
*
*/
@Private
@Unstable
public class MemoryApplicationTimelineStore
extends AbstractService implements ApplicationTimelineStore {
private Map<EntityId, ATSEntity> entities =
new HashMap<EntityId, ATSEntity>();
public MemoryApplicationTimelineStore() {
super(MemoryApplicationTimelineStore.class.getName());
}
@Override
public ATSEntities getEntities(String entityType, Long limit,
Long windowStart, Long windowEnd, NameValuePair primaryFilter,
Collection<NameValuePair> secondaryFilters, EnumSet<Field> fields) {
if (limit == null) {
limit = DEFAULT_LIMIT;
}
if (windowStart == null) {
windowStart = Long.MIN_VALUE;
}
if (windowEnd == null) {
windowEnd = Long.MAX_VALUE;
}
if (fields == null) {
fields = EnumSet.allOf(Field.class);
}
List<ATSEntity> entitiesSelected = new ArrayList<ATSEntity>();
for (ATSEntity entity : new PriorityQueue<ATSEntity>(entities.values())) {
if (entitiesSelected.size() >= limit) {
break;
}
if (!entity.getEntityType().equals(entityType)) {
continue;
}
if (entity.getStartTime() <= windowStart) {
continue;
}
if (entity.getStartTime() > windowEnd) {
continue;
}
if (primaryFilter != null &&
!matchFilter(entity.getPrimaryFilters(), primaryFilter)) {
continue;
}
if (secondaryFilters != null) { // OR logic
boolean flag = false;
for (NameValuePair secondaryFilter : secondaryFilters) {
if (secondaryFilter != null &&
matchFilter(entity.getOtherInfo(), secondaryFilter)) {
flag = true;
break;
}
}
if (!flag) {
continue;
}
}
entitiesSelected.add(entity);
}
List<ATSEntity> entitiesToReturn = new ArrayList<ATSEntity>();
for (ATSEntity entitySelected : entitiesSelected) {
entitiesToReturn.add(maskFields(entitySelected, fields));
}
Collections.sort(entitiesToReturn);
ATSEntities entitiesWrapper = new ATSEntities();
entitiesWrapper.setEntities(entitiesToReturn);
return entitiesWrapper;
}
@Override
public ATSEntity getEntity(String entityId, String entityType,
EnumSet<Field> fieldsToRetrieve) {
if (fieldsToRetrieve == null) {
fieldsToRetrieve = EnumSet.allOf(Field.class);
}
ATSEntity entity = entities.get(new EntityId(entityId, entityType));
if (entity == null) {
return null;
} else {
return maskFields(entity, fieldsToRetrieve);
}
}
@Override
public ATSEvents getEntityTimelines(String entityType,
SortedSet<String> entityIds, Long limit, Long windowStart,
Long windowEnd,
Set<String> eventTypes) {
ATSEvents allEvents = new ATSEvents();
if (entityIds == null) {
return allEvents;
}
if (limit == null) {
limit = DEFAULT_LIMIT;
}
if (windowStart == null) {
windowStart = Long.MIN_VALUE;
}
if (windowEnd == null) {
windowEnd = Long.MAX_VALUE;
}
for (String entityId : entityIds) {
EntityId entityID = new EntityId(entityId, entityType);
ATSEntity entity = entities.get(entityID);
if (entity == null) {
continue;
}
ATSEventsOfOneEntity events = new ATSEventsOfOneEntity();
events.setEntityId(entityId);
events.setEntityType(entityType);
for (ATSEvent event : entity.getEvents()) {
if (events.getEvents().size() >= limit) {
break;
}
if (event.getTimestamp() <= windowStart) {
continue;
}
if (event.getTimestamp() > windowEnd) {
continue;
}
if (eventTypes != null && !eventTypes.contains(event.getEventType())) {
continue;
}
events.addEvent(event);
}
allEvents.addEvent(events);
}
return allEvents;
}
@Override
public ATSPutErrors put(ATSEntities data) {
ATSPutErrors errors = new ATSPutErrors();
for (ATSEntity entity : data.getEntities()) {
EntityId entityId =
new EntityId(entity.getEntityId(), entity.getEntityType());
// store entity info in memory
ATSEntity existingEntity = entities.get(entityId);
if (existingEntity == null) {
existingEntity = new ATSEntity();
existingEntity.setEntityId(entity.getEntityId());
existingEntity.setEntityType(entity.getEntityType());
existingEntity.setStartTime(entity.getStartTime());
entities.put(entityId, existingEntity);
}
if (entity.getEvents() != null) {
if (existingEntity.getEvents() == null) {
existingEntity.setEvents(entity.getEvents());
} else {
existingEntity.addEvents(entity.getEvents());
}
Collections.sort(existingEntity.getEvents());
}
// check startTime
if (existingEntity.getStartTime() == null) {
if (existingEntity.getEvents() == null
|| existingEntity.getEvents().isEmpty()) {
ATSPutError error = new ATSPutError();
error.setEntityId(entityId.getId());
error.setEntityType(entityId.getType());
error.setErrorCode(1);
errors.addError(error);
entities.remove(entityId);
continue;
} else {
existingEntity.setStartTime(entity.getEvents().get(0).getTimestamp());
}
}
if (entity.getPrimaryFilters() != null) {
if (existingEntity.getPrimaryFilters() == null) {
existingEntity.setPrimaryFilters(entity.getPrimaryFilters());
} else {
existingEntity.addPrimaryFilters(entity.getPrimaryFilters());
}
}
if (entity.getOtherInfo() != null) {
if (existingEntity.getOtherInfo() == null) {
existingEntity.setOtherInfo(entity.getOtherInfo());
} else {
existingEntity.addOtherInfo(entity.getOtherInfo());
}
}
// relate it to other entities
if (entity.getRelatedEntities() == null) {
continue;
}
for (Map.Entry<String, List<String>> partRelatedEntities : entity
.getRelatedEntities().entrySet()) {
if (partRelatedEntities == null) {
continue;
}
for (String idStr : partRelatedEntities.getValue()) {
EntityId relatedEntityId =
new EntityId(idStr, partRelatedEntities.getKey());
ATSEntity relatedEntity = entities.get(relatedEntityId);
if (relatedEntity != null) {
relatedEntity.addRelatedEntity(
existingEntity.getEntityType(), existingEntity.getEntityId());
}
}
}
}
return errors;
}
private static ATSEntity maskFields(
ATSEntity entity, EnumSet<Field> fields) {
// Conceal the fields that are not going to be exposed
ATSEntity entityToReturn = new ATSEntity();
entityToReturn.setEntityId(entity.getEntityId());
entityToReturn.setEntityType(entity.getEntityType());
entityToReturn.setStartTime(entity.getStartTime());
entityToReturn.setEvents(fields.contains(Field.EVENTS) ?
entity.getEvents() : fields.contains(Field.LAST_EVENT_ONLY) ?
Arrays.asList(entity.getEvents().get(0)) : null);
entityToReturn.setRelatedEntities(fields.contains(Field.RELATED_ENTITIES) ?
entity.getRelatedEntities() : null);
entityToReturn.setPrimaryFilters(fields.contains(Field.PRIMARY_FILTERS) ?
entity.getPrimaryFilters() : null);
entityToReturn.setOtherInfo(fields.contains(Field.OTHER_INFO) ?
entity.getOtherInfo() : null);
return entityToReturn;
}
private static boolean matchFilter(Map<String, Object> tags,
NameValuePair filter) {
Object value = tags.get(filter.getName());
if (value == null) { // doesn't have the filter
return false;
} else if (!value.equals(filter.getValue())) { // doesn't match the filter
return false;
}
return true;
}
}

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A class holding a name and value pair, used for specifying filters in
* {@link ApplicationTimelineReader}.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class NameValuePair {
String name;
Object value;
public NameValuePair(String name, Object value) {
this.name = name;
this.value = value;
}
/**
* Get the name.
* @return The name.
*/
public String getName() {
return name;
}
/**
* Get the value.
* @return The value.
*/
public Object getValue() {
return value;
}
@Override
public String toString() {
return "{ name: " + name + ", value: " + value + " }";
}
}

View File

@ -0,0 +1,20 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -21,24 +21,31 @@
import org.apache.hadoop.yarn.server.api.ApplicationContext; import org.apache.hadoop.yarn.server.api.ApplicationContext;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager; import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager;
import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.ApplicationTimelineStore;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebApp; import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.YarnWebParams;
public class AHSWebApp extends WebApp implements YarnWebParams { public class AHSWebApp extends WebApp implements YarnWebParams {
private final ApplicationHistoryManager applicationHistoryManager; private final ApplicationHistoryManager applicationHistoryManager;
private final ApplicationTimelineStore applicationTimelineStore;
public AHSWebApp(ApplicationHistoryManager applicationHistoryManager) { public AHSWebApp(ApplicationHistoryManager applicationHistoryManager,
ApplicationTimelineStore applicationTimelineStore) {
this.applicationHistoryManager = applicationHistoryManager; this.applicationHistoryManager = applicationHistoryManager;
this.applicationTimelineStore = applicationTimelineStore;
} }
@Override @Override
public void setup() { public void setup() {
bind(JAXBContextResolver.class); bind(YarnJacksonJaxbJsonProvider.class);
bind(AHSWebServices.class); bind(AHSWebServices.class);
bind(ATSWebServices.class);
bind(GenericExceptionHandler.class); bind(GenericExceptionHandler.class);
bind(ApplicationContext.class).toInstance(applicationHistoryManager); bind(ApplicationContext.class).toInstance(applicationHistoryManager);
bind(ApplicationTimelineStore.class).toInstance(applicationTimelineStore);
route("/", AHSController.class); route("/", AHSController.class);
route(pajoin("/apps", APP_STATE), AHSController.class); route(pajoin("/apps", APP_STATE), AHSController.class);
route(pajoin("/app", APPLICATION_ID), AHSController.class, "app"); route(pajoin("/app", APPLICATION_ID), AHSController.class, "app");

View File

@ -0,0 +1,297 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors;
import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.ApplicationTimelineReader.Field;
import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.ApplicationTimelineStore;
import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.NameValuePair;
import org.apache.hadoop.yarn.webapp.BadRequestException;
import com.google.inject.Inject;
import com.google.inject.Singleton;
@Singleton
@Path("/ws/v1/apptimeline")
//TODO: support XML serialization/deserialization
public class ATSWebServices {
private ApplicationTimelineStore store;
@Inject
public ATSWebServices(ApplicationTimelineStore store) {
this.store = store;
}
@XmlRootElement(name = "about")
@XmlAccessorType(XmlAccessType.NONE)
@Public
@Unstable
public static class AboutInfo {
private String about;
public AboutInfo() {
}
public AboutInfo(String about) {
this.about = about;
}
@XmlElement(name = "About")
public String getAbout() {
return about;
}
public void setAbout(String about) {
this.about = about;
}
}
/**
* Return the description of the application timeline web services.
*/
@GET
@Path("/")
@Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
public AboutInfo about(
@Context HttpServletRequest req,
@Context HttpServletResponse res) {
init(res);
return new AboutInfo("Application Timeline API");
}
/**
* Return a list of entities that match the given parameters.
*/
@GET
@Path("/{entityType}")
@Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
public ATSEntities getEntities(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("entityType") String entityType,
@QueryParam("primaryFilter") String primaryFilter,
@QueryParam("secondaryFilter") String secondaryFilter,
@QueryParam("windowStart") String windowStart,
@QueryParam("windowEnd") String windowEnd,
@QueryParam("limit") String limit,
@QueryParam("fields") String fields) {
init(res);
ATSEntities entities = null;
try {
entities = store.getEntities(
parseStr(entityType),
parseLongStr(limit),
parseLongStr(windowStart),
parseLongStr(windowEnd),
parsePairStr(primaryFilter, ":"),
parsePairsStr(secondaryFilter, ",", ":"),
parseFieldsStr(fields, ","));
} catch (NumberFormatException e) {
throw new BadRequestException(
"windowStart, windowEnd or limit is not a numeric value.");
} catch (IllegalArgumentException e) {
throw new BadRequestException("requested invalid field.");
}
if (entities == null) {
return new ATSEntities();
}
return entities;
}
/**
* Return a single entity of the given entity type and Id.
*/
@GET
@Path("/{entityType}/{entityId}")
@Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
public ATSEntity getEntity(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("entityType") String entityType,
@PathParam("entityId") String entityId,
@QueryParam("fields") String fields) {
init(res);
ATSEntity entity = null;
try {
entity =
store.getEntity(parseStr(entityId), parseStr(entityType),
parseFieldsStr(fields, ","));
} catch (IllegalArgumentException e) {
throw new BadRequestException(
"requested invalid field.");
}
if (entity == null) {
throw new WebApplicationException(Response.Status.NOT_FOUND);
}
return entity;
}
/**
* Return the events that match the given parameters.
*/
@GET
@Path("/{entityType}/events")
@Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
public ATSEvents getEvents(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("entityType") String entityType,
@QueryParam("entityId") String entityId,
@QueryParam("eventType") String eventType,
@QueryParam("windowStart") String windowStart,
@QueryParam("windowEnd") String windowEnd,
@QueryParam("limit") String limit) {
init(res);
ATSEvents events = null;
try {
events = store.getEntityTimelines(
parseStr(entityType),
parseArrayStr(entityId, ","),
parseLongStr(limit),
parseLongStr(windowStart),
parseLongStr(windowEnd),
parseArrayStr(eventType, ","));
} catch (NumberFormatException e) {
throw new BadRequestException(
"windowStart, windowEnd or limit is not a numeric value.");
}
if (events == null) {
return new ATSEvents();
}
return events;
}
/**
* Store the given entities into the timeline store, and return the errors
* that happen during storing.
*/
@POST
@Path("/")
@Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
public ATSPutErrors postEntities(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
ATSEntities entities) {
init(res);
if (entities == null) {
return new ATSPutErrors();
}
return store.put(entities);
}
private void init(HttpServletResponse response) {
response.setContentType(null);
}
private static SortedSet<String> parseArrayStr(String str, String delimiter) {
if (str == null) {
return null;
}
SortedSet<String> strSet = new TreeSet<String>();
String[] strs = str.split(delimiter);
for (String aStr : strs) {
strSet.add(aStr.trim());
}
return strSet;
}
private static NameValuePair parsePairStr(String str, String delimiter) {
if (str == null) {
return null;
}
String[] strs = str.split(delimiter, 2);
return new NameValuePair(strs[0].trim(), strs[1].trim());
}
private static Collection<NameValuePair> parsePairsStr(
String str, String aDelimiter, String pDelimiter) {
if (str == null) {
return null;
}
String[] strs = str.split(aDelimiter);
Set<NameValuePair> pairs = new HashSet<NameValuePair>();
for (String aStr : strs) {
pairs.add(parsePairStr(aStr, pDelimiter));
}
return pairs;
}
private static EnumSet<Field> parseFieldsStr(String str, String delimiter) {
if (str == null) {
return null;
}
String[] strs = str.split(delimiter);
List<Field> fieldList = new ArrayList<Field>();
for (String s : strs) {
fieldList.add(Field.valueOf(s.toUpperCase()));
}
if (fieldList.size() == 0)
return null;
Field f1 = fieldList.remove(fieldList.size() - 1);
if (fieldList.size() == 0)
return EnumSet.of(f1);
else
return EnumSet.of(f1, fieldList.toArray(new Field[fieldList.size()]));
}
private static Long parseLongStr(String str) {
return str == null ? null : Long.parseLong(str.trim());
}
private static String parseStr(String str) {
return str == null ? null : str.trim();
}
}

View File

@ -40,7 +40,7 @@ public void testStartStopServer() throws Exception {
Configuration config = new YarnConfiguration(); Configuration config = new YarnConfiguration();
historyServer.init(config); historyServer.init(config);
assertEquals(STATE.INITED, historyServer.getServiceState()); assertEquals(STATE.INITED, historyServer.getServiceState());
assertEquals(2, historyServer.getServices().size()); assertEquals(3, historyServer.getServices().size());
ApplicationHistoryClientService historyService = ApplicationHistoryClientService historyService =
historyServer.getClientService(); historyServer.getClientService();
assertNotNull(historyServer.getClientService()); assertNotNull(historyServer.getClientService());

View File

@ -0,0 +1,532 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.SortedSet;
import java.util.TreeSet;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvent;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents.ATSEventsOfOneEntity;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors.ATSPutError;
import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.ApplicationTimelineReader.Field;
public class ApplicationTimelineStoreTestUtils {
private static final Map<String, Object> EMPTY_MAP = Collections.emptyMap();
private static final Map<String, List<String>> EMPTY_REL_ENTITIES =
new HashMap<String, List<String>>();
protected ApplicationTimelineStore store;
private String entity1;
private String entityType1;
private String entity1b;
private String entity2;
private String entityType2;
private Map<String, Object> primaryFilters;
private Map<String, Object> secondaryFilters;
private Map<String, Object> allFilters;
private Map<String, Object> otherInfo;
private Map<String, List<String>> relEntityMap;
private NameValuePair userFilter;
private Collection<NameValuePair> goodTestingFilters;
private Collection<NameValuePair> badTestingFilters;
private ATSEvent ev1;
private ATSEvent ev2;
private ATSEvent ev3;
private ATSEvent ev4;
private Map<String, Object> eventInfo;
private List<ATSEvent> events1;
private List<ATSEvent> events2;
/**
* Load test data into the given store
*/
protected void loadTestData() {
ATSEntities atsEntities = new ATSEntities();
Map<String, Object> primaryFilters = new HashMap<String, Object>();
primaryFilters.put("user", "username");
primaryFilters.put("appname", 12345l);
Map<String, Object> secondaryFilters = new HashMap<String, Object>();
secondaryFilters.put("startTime", 123456l);
secondaryFilters.put("status", "RUNNING");
Map<String, Object> otherInfo1 = new HashMap<String, Object>();
otherInfo1.put("info1", "val1");
otherInfo1.putAll(secondaryFilters);
String entity1 = "id_1";
String entityType1 = "type_1";
String entity1b = "id_2";
String entity2 = "id_2";
String entityType2 = "type_2";
Map<String, List<String>> relatedEntities =
new HashMap<String, List<String>>();
relatedEntities.put(entityType2, Collections.singletonList(entity2));
ATSEvent ev3 = createEvent(789l, "launch_event", null);
ATSEvent ev4 = createEvent(-123l, "init_event", null);
List<ATSEvent> events = new ArrayList<ATSEvent>();
events.add(ev3);
events.add(ev4);
atsEntities.setEntities(Collections.singletonList(createEntity(entity2,
entityType2, null, events, null, null, null)));
ATSPutErrors response = store.put(atsEntities);
assertEquals(0, response.getErrors().size());
ATSEvent ev1 = createEvent(123l, "start_event", null);
atsEntities.setEntities(Collections.singletonList(createEntity(entity1,
entityType1, 123l, Collections.singletonList(ev1),
relatedEntities, primaryFilters, otherInfo1)));
response = store.put(atsEntities);
assertEquals(0, response.getErrors().size());
atsEntities.setEntities(Collections.singletonList(createEntity(entity1b,
entityType1, null, Collections.singletonList(ev1), relatedEntities,
primaryFilters, otherInfo1)));
response = store.put(atsEntities);
assertEquals(0, response.getErrors().size());
Map<String, Object> eventInfo = new HashMap<String, Object>();
eventInfo.put("event info 1", "val1");
ATSEvent ev2 = createEvent(456l, "end_event", eventInfo);
Map<String, Object> otherInfo2 = new HashMap<String, Object>();
otherInfo2.put("info2", "val2");
atsEntities.setEntities(Collections.singletonList(createEntity(entity1,
entityType1, null, Collections.singletonList(ev2), null,
primaryFilters, otherInfo2)));
response = store.put(atsEntities);
assertEquals(0, response.getErrors().size());
atsEntities.setEntities(Collections.singletonList(createEntity(entity1b,
entityType1, 123l, Collections.singletonList(ev2), null,
primaryFilters, otherInfo2)));
response = store.put(atsEntities);
assertEquals(0, response.getErrors().size());
atsEntities.setEntities(Collections.singletonList(createEntity(
"badentityid", "badentity", null, null, null, null, otherInfo1)));
response = store.put(atsEntities);
assertEquals(1, response.getErrors().size());
ATSPutError error = response.getErrors().get(0);
assertEquals("badentityid", error.getEntityId());
assertEquals("badentity", error.getEntityType());
assertEquals((Integer) 1, error.getErrorCode());
}
/**
* Load veification data
*/
protected void loadVerificationData() throws Exception {
userFilter = new NameValuePair("user",
"username");
goodTestingFilters = new ArrayList<NameValuePair>();
goodTestingFilters.add(new NameValuePair("appname", 12345l));
goodTestingFilters.add(new NameValuePair("status", "RUNNING"));
badTestingFilters = new ArrayList<NameValuePair>();
badTestingFilters.add(new NameValuePair("appname", 12345l));
badTestingFilters.add(new NameValuePair("status", "FINISHED"));
primaryFilters = new HashMap<String, Object>();
primaryFilters.put("user", "username");
primaryFilters.put("appname", 12345l);
secondaryFilters = new HashMap<String, Object>();
secondaryFilters.put("startTime", 123456l);
secondaryFilters.put("status", "RUNNING");
allFilters = new HashMap<String, Object>();
allFilters.putAll(secondaryFilters);
allFilters.putAll(primaryFilters);
otherInfo = new HashMap<String, Object>();
otherInfo.put("info1", "val1");
otherInfo.put("info2", "val2");
otherInfo.putAll(secondaryFilters);
entity1 = "id_1";
entityType1 = "type_1";
entity1b = "id_2";
entity2 = "id_2";
entityType2 = "type_2";
ev1 = createEvent(123l, "start_event", null);
eventInfo = new HashMap<String, Object>();
eventInfo.put("event info 1", "val1");
ev2 = createEvent(456l, "end_event", eventInfo);
events1 = new ArrayList<ATSEvent>();
events1.add(ev2);
events1.add(ev1);
relEntityMap =
new HashMap<String, List<String>>();
List<String> ids = new ArrayList<String>();
ids.add(entity1);
ids.add(entity1b);
relEntityMap.put(entityType1, ids);
ev3 = createEvent(789l, "launch_event", null);
ev4 = createEvent(-123l, "init_event", null);
events2 = new ArrayList<ATSEvent>();
events2.add(ev3);
events2.add(ev4);
}
public void testGetSingleEntity() {
// test getting entity info
verifyEntityInfo(null, null, null, null, null, null,
store.getEntity("id_1", "type_2", EnumSet.allOf(Field.class)));
verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, store.getEntity(entity1, entityType1,
EnumSet.allOf(Field.class)));
verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, store.getEntity(entity1b, entityType1,
EnumSet.allOf(Field.class)));
verifyEntityInfo(entity2, entityType2, events2, relEntityMap, EMPTY_MAP,
EMPTY_MAP, store.getEntity(entity2, entityType2,
EnumSet.allOf(Field.class)));
// test getting single fields
verifyEntityInfo(entity1, entityType1, events1, null, null, null,
store.getEntity(entity1, entityType1, EnumSet.of(Field.EVENTS)));
verifyEntityInfo(entity1, entityType1, Collections.singletonList(ev2),
null, null, null, store.getEntity(entity1, entityType1,
EnumSet.of(Field.LAST_EVENT_ONLY)));
verifyEntityInfo(entity1, entityType1, null, null, primaryFilters, null,
store.getEntity(entity1, entityType1,
EnumSet.of(Field.PRIMARY_FILTERS)));
verifyEntityInfo(entity1, entityType1, null, null, null, otherInfo,
store.getEntity(entity1, entityType1, EnumSet.of(Field.OTHER_INFO)));
verifyEntityInfo(entity2, entityType2, null, relEntityMap, null, null,
store.getEntity(entity2, entityType2,
EnumSet.of(Field.RELATED_ENTITIES)));
}
public void testGetEntities() {
// test getting entities
assertEquals("nonzero entities size for nonexistent type", 0,
store.getEntities("type_0", null, null, null, null, null,
null).getEntities().size());
assertEquals("nonzero entities size for nonexistent type", 0,
store.getEntities("type_3", null, null, null, null, null,
null).getEntities().size());
assertEquals("nonzero entities size for nonexistent type", 0,
store.getEntities("type_0", null, null, null, userFilter,
null, null).getEntities().size());
assertEquals("nonzero entities size for nonexistent type", 0,
store.getEntities("type_3", null, null, null, userFilter,
null, null).getEntities().size());
List<ATSEntity> entities =
store.getEntities("type_1", null, null, null, null, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(2, entities.size());
verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0));
verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1));
entities = store.getEntities("type_2", null, null, null, null, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(1, entities.size());
verifyEntityInfo(entity2, entityType2, events2, relEntityMap, EMPTY_MAP,
EMPTY_MAP, entities.get(0));
entities = store.getEntities("type_1", 1l, null, null, null, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(1, entities.size());
verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0));
entities = store.getEntities("type_1", 1l, 0l, null, null, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(1, entities.size());
verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0));
entities = store.getEntities("type_1", null, 234l, null, null, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(0, entities.size());
entities = store.getEntities("type_1", null, 123l, null, null, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(0, entities.size());
entities = store.getEntities("type_1", null, 234l, 345l, null, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(0, entities.size());
entities = store.getEntities("type_1", null, null, 345l, null, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(2, entities.size());
verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0));
verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1));
entities = store.getEntities("type_1", null, null, 123l, null, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(2, entities.size());
verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0));
verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1));
}
public void testGetEntitiesWithPrimaryFilters() {
// test using primary filter
assertEquals("nonzero entities size for primary filter", 0,
store.getEntities("type_1", null, null, null,
new NameValuePair("none", "none"), null,
EnumSet.allOf(Field.class)).getEntities().size());
assertEquals("nonzero entities size for primary filter", 0,
store.getEntities("type_2", null, null, null,
new NameValuePair("none", "none"), null,
EnumSet.allOf(Field.class)).getEntities().size());
assertEquals("nonzero entities size for primary filter", 0,
store.getEntities("type_3", null, null, null,
new NameValuePair("none", "none"), null,
EnumSet.allOf(Field.class)).getEntities().size());
List<ATSEntity> entities = store.getEntities("type_1", null, null, null,
userFilter, null, EnumSet.allOf(Field.class)).getEntities();
assertEquals(2, entities.size());
verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0));
verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1));
entities = store.getEntities("type_2", null, null, null, userFilter, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(0, entities.size());
entities = store.getEntities("type_1", 1l, null, null, userFilter, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(1, entities.size());
verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0));
entities = store.getEntities("type_1", 1l, 0l, null, userFilter, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(1, entities.size());
verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0));
entities = store.getEntities("type_1", null, 234l, null, userFilter, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(0, entities.size());
entities = store.getEntities("type_1", null, 234l, 345l, userFilter, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(0, entities.size());
entities = store.getEntities("type_1", null, null, 345l, userFilter, null,
EnumSet.allOf(Field.class)).getEntities();
assertEquals(2, entities.size());
verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0));
verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1));
}
public void testGetEntitiesWithSecondaryFilters() {
// test using secondary filter
List<ATSEntity> entities = store.getEntities("type_1", null, null, null,
null, goodTestingFilters, EnumSet.allOf(Field.class)).getEntities();
assertEquals(2, entities.size());
verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0));
verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1));
entities = store.getEntities("type_1", null, null, null, userFilter,
goodTestingFilters, EnumSet.allOf(Field.class)).getEntities();
assertEquals(2, entities.size());
verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(0));
verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
primaryFilters, otherInfo, entities.get(1));
entities = store.getEntities("type_1", null, null, null, null,
badTestingFilters, EnumSet.allOf(Field.class)).getEntities();
assertEquals(0, entities.size());
entities = store.getEntities("type_1", null, null, null, userFilter,
badTestingFilters, EnumSet.allOf(Field.class)).getEntities();
assertEquals(0, entities.size());
}
public void testGetEvents() {
// test getting entity timelines
SortedSet<String> sortedSet = new TreeSet<String>();
sortedSet.add(entity1);
List<ATSEventsOfOneEntity> timelines =
store.getEntityTimelines(entityType1, sortedSet, null, null,
null, null).getAllEvents();
assertEquals(1, timelines.size());
verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev2, ev1);
sortedSet.add(entity1b);
timelines = store.getEntityTimelines(entityType1, sortedSet, null,
null, null, null).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev2, ev1);
verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev2, ev1);
timelines = store.getEntityTimelines(entityType1, sortedSet, 1l,
null, null, null).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev2);
verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev2);
timelines = store.getEntityTimelines(entityType1, sortedSet, null,
345l, null, null).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev2);
verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev2);
timelines = store.getEntityTimelines(entityType1, sortedSet, null,
123l, null, null).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev2);
verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev2);
timelines = store.getEntityTimelines(entityType1, sortedSet, null,
null, 345l, null).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev1);
verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev1);
timelines = store.getEntityTimelines(entityType1, sortedSet, null,
null, 123l, null).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev1);
verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev1);
timelines = store.getEntityTimelines(entityType1, sortedSet, null,
null, null, Collections.singleton("end_event")).getAllEvents();
assertEquals(2, timelines.size());
verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev2);
verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev2);
sortedSet.add(entity2);
timelines = store.getEntityTimelines(entityType2, sortedSet, null,
null, null, null).getAllEvents();
assertEquals(1, timelines.size());
verifyEntityTimeline(timelines.get(0), entity2, entityType2, ev3, ev4);
}
/**
* Verify a single entity
*/
private static void verifyEntityInfo(String entity, String entityType,
List<ATSEvent> events, Map<String, List<String>> relatedEntities,
Map<String, Object> primaryFilters, Map<String, Object> otherInfo,
ATSEntity retrievedEntityInfo) {
if (entity == null) {
assertNull(retrievedEntityInfo);
return;
}
assertEquals(entity, retrievedEntityInfo.getEntityId());
assertEquals(entityType, retrievedEntityInfo.getEntityType());
if (events == null)
assertNull(retrievedEntityInfo.getEvents());
else
assertEquals(events, retrievedEntityInfo.getEvents());
if (relatedEntities == null)
assertNull(retrievedEntityInfo.getRelatedEntities());
else
assertEquals(relatedEntities, retrievedEntityInfo.getRelatedEntities());
if (primaryFilters == null)
assertNull(retrievedEntityInfo.getPrimaryFilters());
else
assertTrue(primaryFilters.equals(
retrievedEntityInfo.getPrimaryFilters()));
if (otherInfo == null)
assertNull(retrievedEntityInfo.getOtherInfo());
else
assertTrue(otherInfo.equals(retrievedEntityInfo.getOtherInfo()));
}
/**
* Verify timeline events
*/
private static void verifyEntityTimeline(
ATSEventsOfOneEntity retrievedEvents, String entity, String entityType,
ATSEvent... actualEvents) {
assertEquals(entity, retrievedEvents.getEntityId());
assertEquals(entityType, retrievedEvents.getEntityType());
assertEquals(actualEvents.length, retrievedEvents.getEvents().size());
for (int i = 0; i < actualEvents.length; i++) {
assertEquals(actualEvents[i], retrievedEvents.getEvents().get(i));
}
}
/**
* Create a test entity
*/
private static ATSEntity createEntity(String entity, String entityType,
Long startTime, List<ATSEvent> events,
Map<String, List<String>> relatedEntities,
Map<String, Object> primaryFilters, Map<String, Object> otherInfo) {
ATSEntity atsEntity = new ATSEntity();
atsEntity.setEntityId(entity);
atsEntity.setEntityType(entityType);
atsEntity.setStartTime(startTime);
atsEntity.setEvents(events);
if (relatedEntities != null)
for (Entry<String, List<String>> e : relatedEntities.entrySet())
for (String v : e.getValue())
atsEntity.addRelatedEntity(e.getKey(), v);
else
atsEntity.setRelatedEntities(null);
atsEntity.setPrimaryFilters(primaryFilters);
atsEntity.setOtherInfo(otherInfo);
return atsEntity;
}
/**
* Create a test event
*/
private static ATSEvent createEvent(long timestamp, String type, Map<String,
Object> info) {
ATSEvent event = new ATSEvent();
event.setTimestamp(timestamp);
event.setEventType(type);
event.setEventInfo(info);
return event;
}
}

View File

@ -0,0 +1,73 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestMemoryApplicationTimelineStore
extends ApplicationTimelineStoreTestUtils {
@Before
public void setup() throws Exception {
store = new MemoryApplicationTimelineStore();
store.init(new YarnConfiguration());
store.start();
loadTestData();
loadVerificationData();
}
@After
public void tearDown() throws Exception {
store.stop();
}
public ApplicationTimelineStore getApplicationTimelineStore() {
return store;
}
@Test
public void testGetSingleEntity() {
super.testGetSingleEntity();
}
@Test
public void testGetEntities() {
super.testGetEntities();
}
@Test
public void testGetEntitiesWithPrimaryFilters() {
super.testGetEntitiesWithPrimaryFilters();
}
@Test
public void testGetEntitiesWithSecondaryFilters() {
super.testGetEntitiesWithSecondaryFilters();
}
@Test
public void testGetEvents() {
super.testGetEvents();
}
}

View File

@ -0,0 +1,212 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
import static org.junit.Assert.assertEquals;
import javax.ws.rs.core.MediaType;
import junit.framework.Assert;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvent;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents;
import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors;
import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.ApplicationTimelineStore;
import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.TestMemoryApplicationTimelineStore;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
import org.junit.Test;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.api.client.config.DefaultClientConfig;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
public class TestATSWebServices extends JerseyTest {
private static ApplicationTimelineStore store;
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
bind(YarnJacksonJaxbJsonProvider.class);
bind(ATSWebServices.class);
bind(GenericExceptionHandler.class);
try{
store = mockApplicationTimelineStore();
} catch (Exception e) {
Assert.fail();
}
bind(ApplicationTimelineStore.class).toInstance(store);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
private ApplicationTimelineStore mockApplicationTimelineStore()
throws Exception {
TestMemoryApplicationTimelineStore store =
new TestMemoryApplicationTimelineStore();
store.setup();
return store.getApplicationTimelineStore();
}
public TestATSWebServices() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.yarn.server.applicationhistoryservice.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter")
.servletPath("/")
.clientConfig(new DefaultClientConfig(YarnJacksonJaxbJsonProvider.class))
.build());
}
@Test
public void testAbout() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("apptimeline")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
ATSWebServices.AboutInfo about =
response.getEntity(ATSWebServices.AboutInfo.class);
Assert.assertNotNull(about);
Assert.assertEquals("Application Timeline API", about.getAbout());
}
@Test
public void testGetEntities() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("apptimeline")
.path("type_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
ATSEntities entities = response.getEntity(ATSEntities.class);
Assert.assertNotNull(entities);
Assert.assertEquals(2, entities.getEntities().size());
ATSEntity entity1 = entities.getEntities().get(0);
Assert.assertNotNull(entity1);
Assert.assertEquals("id_1", entity1.getEntityId());
Assert.assertEquals("type_1", entity1.getEntityType());
Assert.assertEquals(123l, entity1.getStartTime().longValue());
Assert.assertEquals(2, entity1.getEvents().size());
Assert.assertEquals(2, entity1.getPrimaryFilters().size());
Assert.assertEquals(4, entity1.getOtherInfo().size());
ATSEntity entity2 = entities.getEntities().get(1);
Assert.assertNotNull(entity2);
Assert.assertEquals("id_2", entity2.getEntityId());
Assert.assertEquals("type_1", entity2.getEntityType());
Assert.assertEquals(123l, entity2.getStartTime().longValue());
Assert.assertEquals(2, entity2.getEvents().size());
Assert.assertEquals(2, entity2.getPrimaryFilters().size());
Assert.assertEquals(4, entity2.getOtherInfo().size());
}
@Test
public void testGetEntity() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("apptimeline")
.path("type_1").path("id_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
ATSEntity entity = response.getEntity(ATSEntity.class);
Assert.assertNotNull(entity);
Assert.assertEquals("id_1", entity.getEntityId());
Assert.assertEquals("type_1", entity.getEntityType());
Assert.assertEquals(123l, entity.getStartTime().longValue());
Assert.assertEquals(2, entity.getEvents().size());
Assert.assertEquals(2, entity.getPrimaryFilters().size());
Assert.assertEquals(4, entity.getOtherInfo().size());
}
@Test
public void testGetEvents() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("apptimeline")
.path("type_1").path("events")
.queryParam("entityId", "id_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
ATSEvents events = response.getEntity(ATSEvents.class);
Assert.assertNotNull(events);
Assert.assertEquals(1, events.getAllEvents().size());
ATSEvents.ATSEventsOfOneEntity partEvents = events.getAllEvents().get(0);
Assert.assertEquals(2, partEvents.getEvents().size());
ATSEvent event1 = partEvents.getEvents().get(0);
Assert.assertEquals(456l, event1.getTimestamp());
Assert.assertEquals("end_event", event1.getEventType());
Assert.assertEquals(1, event1.getEventInfo().size());
ATSEvent event2 = partEvents.getEvents().get(1);
Assert.assertEquals(123l, event2.getTimestamp());
Assert.assertEquals("start_event", event2.getEventType());
Assert.assertEquals(0, event2.getEventInfo().size());
}
@Test
public void testPostEntities() throws Exception {
ATSEntities entities = new ATSEntities();
ATSEntity entity = new ATSEntity();
entity.setEntityId("test id");
entity.setEntityType("test type");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("apptimeline")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
ATSPutErrors errors = response.getEntity(ATSPutErrors.class);
Assert.assertNotNull(errors);
Assert.assertEquals(0, errors.getErrors().size());
// verify the entity exists in the store
response = r.path("ws").path("v1").path("apptimeline")
.path("test type").path("test id")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
entity = response.getEntity(ATSEntity.class);
Assert.assertNotNull(entity);
Assert.assertEquals("test id", entity.getEntityId());
Assert.assertEquals("test type", entity.getEntityType());
}
}

View File

@ -26,6 +26,7 @@
import java.util.Comparator; import java.util.Comparator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -312,7 +313,7 @@ public static ApplicationReport newApplicationReport(
String url, long startTime, long finishTime, String url, long startTime, long finishTime,
FinalApplicationStatus finalStatus, FinalApplicationStatus finalStatus,
ApplicationResourceUsageReport appResources, String origTrackingUrl, ApplicationResourceUsageReport appResources, String origTrackingUrl,
float progress, String appType, Token amRmToken) { float progress, String appType, Token amRmToken, Set<String> tags) {
ApplicationReport report = recordFactory ApplicationReport report = recordFactory
.newRecordInstance(ApplicationReport.class); .newRecordInstance(ApplicationReport.class);
report.setApplicationId(applicationId); report.setApplicationId(applicationId);
@ -334,6 +335,7 @@ public static ApplicationReport newApplicationReport(
report.setProgress(progress); report.setProgress(progress);
report.setApplicationType(appType); report.setApplicationType(appType);
report.setAMRMToken(amRmToken); report.setAMRMToken(amRmToken);
report.setApplicationTags(tags);
return report; return report;
} }

View File

@ -45,8 +45,11 @@
import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.yarn.LocalConfigurationProvider;
import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.conf.ConfigurationProvider;
import org.apache.hadoop.yarn.conf.ConfigurationProviderFactory;
import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnException;
@ -72,6 +75,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider; import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService; import com.google.protobuf.BlockingService;
public class AdminService extends CompositeService implements public class AdminService extends CompositeService implements
@ -89,6 +93,8 @@ public class AdminService extends CompositeService implements
private InetSocketAddress masterServiceAddress; private InetSocketAddress masterServiceAddress;
private AccessControlList adminAcl; private AccessControlList adminAcl;
private ConfigurationProvider configurationProvider = null;
private final RecordFactory recordFactory = private final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null); RecordFactoryProvider.getRecordFactory(null);
@ -109,6 +115,10 @@ public synchronized void serviceInit(Configuration conf) throws Exception {
} }
} }
this.configurationProvider =
ConfigurationProviderFactory.getConfigurationProvider(conf);
configurationProvider.init(conf);
masterServiceAddress = conf.getSocketAddr( masterServiceAddress = conf.getSocketAddr(
YarnConfiguration.RM_ADMIN_ADDRESS, YarnConfiguration.RM_ADMIN_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
@ -129,6 +139,9 @@ protected synchronized void serviceStart() throws Exception {
@Override @Override
protected synchronized void serviceStop() throws Exception { protected synchronized void serviceStop() throws Exception {
stopServer(); stopServer();
if (this.configurationProvider != null) {
configurationProvider.close();
}
super.serviceStop(); super.serviceStop();
} }
@ -295,23 +308,28 @@ public synchronized HAServiceStatus getServiceStatus() throws IOException {
@Override @Override
public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request)
throws YarnException, StandbyException { throws YarnException, StandbyException {
UserGroupInformation user = checkAcls("refreshQueues"); String argName = "refreshQueues";
UserGroupInformation user = checkAcls(argName);
if (!isRMActive()) { if (!isRMActive()) {
RMAuditLogger.logFailure(user.getShortUserName(), "refreshQueues", RMAuditLogger.logFailure(user.getShortUserName(), argName,
adminAcl.toString(), "AdminService", adminAcl.toString(), "AdminService",
"ResourceManager is not active. Can not refresh queues."); "ResourceManager is not active. Can not refresh queues.");
throwStandbyException(); throwStandbyException();
} }
RefreshQueuesResponse response =
recordFactory.newRecordInstance(RefreshQueuesResponse.class);
try { try {
rmContext.getScheduler().reinitialize(getConfig(), this.rmContext); Configuration conf =
RMAuditLogger.logSuccess(user.getShortUserName(), "refreshQueues", getConfiguration(YarnConfiguration.CS_CONFIGURATION_FILE);
rmContext.getScheduler().reinitialize(conf, this.rmContext);
RMAuditLogger.logSuccess(user.getShortUserName(), argName,
"AdminService"); "AdminService");
return recordFactory.newRecordInstance(RefreshQueuesResponse.class); return response;
} catch (IOException ioe) { } catch (IOException ioe) {
LOG.info("Exception refreshing queues ", ioe); LOG.info("Exception refreshing queues ", ioe);
RMAuditLogger.logFailure(user.getShortUserName(), "refreshQueues", RMAuditLogger.logFailure(user.getShortUserName(), argName,
adminAcl.toString(), "AdminService", adminAcl.toString(), "AdminService",
"Exception refreshing queues"); "Exception refreshing queues");
throw RPCUtil.getRemoteException(ioe); throw RPCUtil.getRemoteException(ioe);
@ -346,21 +364,22 @@ public RefreshNodesResponse refreshNodes(RefreshNodesRequest request)
@Override @Override
public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration( public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration(
RefreshSuperUserGroupsConfigurationRequest request) RefreshSuperUserGroupsConfigurationRequest request)
throws YarnException, StandbyException { throws YarnException, IOException {
UserGroupInformation user = checkAcls("refreshSuperUserGroupsConfiguration"); String argName = "refreshSuperUserGroupsConfiguration";
UserGroupInformation user = checkAcls(argName);
// TODO (YARN-1459): Revisit handling super-user-groups on Standby RM
if (!isRMActive()) { if (!isRMActive()) {
RMAuditLogger.logFailure(user.getShortUserName(), RMAuditLogger.logFailure(user.getShortUserName(), argName,
"refreshSuperUserGroupsConfiguration",
adminAcl.toString(), "AdminService", adminAcl.toString(), "AdminService",
"ResourceManager is not active. Can not refresh super-user-groups."); "ResourceManager is not active. Can not refresh super-user-groups.");
throwStandbyException(); throwStandbyException();
} }
ProxyUsers.refreshSuperUserGroupsConfiguration(new Configuration()); Configuration conf =
getConfiguration(YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
RMAuditLogger.logSuccess(user.getShortUserName(), RMAuditLogger.logSuccess(user.getShortUserName(),
"refreshSuperUserGroupsConfiguration", "AdminService"); argName, "AdminService");
return recordFactory.newRecordInstance( return recordFactory.newRecordInstance(
RefreshSuperUserGroupsConfigurationResponse.class); RefreshSuperUserGroupsConfigurationResponse.class);
@ -391,14 +410,22 @@ public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
@Override @Override
public RefreshAdminAclsResponse refreshAdminAcls( public RefreshAdminAclsResponse refreshAdminAcls(
RefreshAdminAclsRequest request) throws YarnException { RefreshAdminAclsRequest request) throws YarnException, IOException {
UserGroupInformation user = checkAcls("refreshAdminAcls"); String argName = "refreshAdminAcls";
UserGroupInformation user = checkAcls(argName);
Configuration conf = new Configuration(); if (!isRMActive()) {
RMAuditLogger.logFailure(user.getShortUserName(), argName,
adminAcl.toString(), "AdminService",
"ResourceManager is not active. Can not refresh user-groups.");
throwStandbyException();
}
Configuration conf =
getConfiguration(YarnConfiguration.YARN_SITE_XML_FILE);
adminAcl = new AccessControlList(conf.get( adminAcl = new AccessControlList(conf.get(
YarnConfiguration.YARN_ADMIN_ACL, YarnConfiguration.YARN_ADMIN_ACL,
YarnConfiguration.DEFAULT_YARN_ADMIN_ACL)); YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
RMAuditLogger.logSuccess(user.getShortUserName(), "refreshAdminAcls", RMAuditLogger.logSuccess(user.getShortUserName(), argName,
"AdminService"); "AdminService");
return recordFactory.newRecordInstance(RefreshAdminAclsResponse.class); return recordFactory.newRecordInstance(RefreshAdminAclsResponse.class);
@ -406,9 +433,8 @@ public RefreshAdminAclsResponse refreshAdminAcls(
@Override @Override
public RefreshServiceAclsResponse refreshServiceAcls( public RefreshServiceAclsResponse refreshServiceAcls(
RefreshServiceAclsRequest request) throws YarnException { RefreshServiceAclsRequest request) throws YarnException, IOException {
Configuration conf = new Configuration(); if (!getConfig().getBoolean(
if (!conf.getBoolean(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
false)) { false)) {
throw RPCUtil.getRemoteException( throw RPCUtil.getRemoteException(
@ -417,26 +443,37 @@ public RefreshServiceAclsResponse refreshServiceAcls(
") not enabled.")); ") not enabled."));
} }
String argName = "refreshServiceAcls";
if (!isRMActive()) {
RMAuditLogger.logFailure(UserGroupInformation.getCurrentUser()
.getShortUserName(), argName,
adminAcl.toString(), "AdminService",
"ResourceManager is not active. Can not refresh Service ACLs.");
throwStandbyException();
}
PolicyProvider policyProvider = new RMPolicyProvider(); PolicyProvider policyProvider = new RMPolicyProvider();
Configuration conf =
getConfiguration(YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE);
refreshServiceAcls(conf, policyProvider); refreshServiceAcls(conf, policyProvider);
if (isRMActive()) {
rmContext.getClientRMService().refreshServiceAcls(conf, policyProvider); rmContext.getClientRMService().refreshServiceAcls(conf, policyProvider);
rmContext.getApplicationMasterService().refreshServiceAcls( rmContext.getApplicationMasterService().refreshServiceAcls(
conf, policyProvider); conf, policyProvider);
rmContext.getResourceTrackerService().refreshServiceAcls( rmContext.getResourceTrackerService().refreshServiceAcls(
conf, policyProvider); conf, policyProvider);
} else {
LOG.warn("ResourceManager is not active. Not refreshing ACLs for " +
"Clients, ApplicationMasters and NodeManagers");
}
return recordFactory.newRecordInstance(RefreshServiceAclsResponse.class); return recordFactory.newRecordInstance(RefreshServiceAclsResponse.class);
} }
void refreshServiceAcls(Configuration configuration, synchronized void refreshServiceAcls(Configuration configuration,
PolicyProvider policyProvider) { PolicyProvider policyProvider) {
if (this.configurationProvider instanceof LocalConfigurationProvider) {
this.server.refreshServiceAcl(configuration, policyProvider); this.server.refreshServiceAcl(configuration, policyProvider);
} else {
this.server.refreshServiceAclWithConfigration(configuration,
policyProvider);
}
} }
@Override @Override
@ -484,4 +521,18 @@ public UpdateNodeResourceResponse updateNodeResource(
return response; return response;
} }
private synchronized Configuration getConfiguration(String confFileName)
throws YarnException, IOException {
return this.configurationProvider.getConfiguration(confFileName);
}
@VisibleForTesting
public AccessControlList getAccessControlList() {
return this.adminAcl;
}
@VisibleForTesting
public Server getServer() {
return this.server;
}
} }

View File

@ -39,6 +39,7 @@
import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.LocalConfigurationProvider;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
@ -86,6 +87,8 @@
import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider; import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import com.google.common.annotations.VisibleForTesting;
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
@Private @Private
public class ApplicationMasterService extends AbstractService implements public class ApplicationMasterService extends AbstractService implements
@ -102,6 +105,7 @@ public class ApplicationMasterService extends AbstractService implements
private final AllocateResponse resync = private final AllocateResponse resync =
recordFactory.newRecordInstance(AllocateResponse.class); recordFactory.newRecordInstance(AllocateResponse.class);
private final RMContext rmContext; private final RMContext rmContext;
private boolean useLocalConfigurationProvider;
public ApplicationMasterService(RMContext rmContext, YarnScheduler scheduler) { public ApplicationMasterService(RMContext rmContext, YarnScheduler scheduler) {
super(ApplicationMasterService.class.getName()); super(ApplicationMasterService.class.getName());
@ -111,6 +115,15 @@ public ApplicationMasterService(RMContext rmContext, YarnScheduler scheduler) {
this.rmContext = rmContext; this.rmContext = rmContext;
} }
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.useLocalConfigurationProvider =
(LocalConfigurationProvider.class.isAssignableFrom(conf.getClass(
YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
LocalConfigurationProvider.class)));
super.serviceInit(conf);
}
@Override @Override
protected void serviceStart() throws Exception { protected void serviceStart() throws Exception {
Configuration conf = getConfig(); Configuration conf = getConfig();
@ -578,7 +591,12 @@ public void unregisterAttempt(ApplicationAttemptId attemptId) {
public void refreshServiceAcls(Configuration configuration, public void refreshServiceAcls(Configuration configuration,
PolicyProvider policyProvider) { PolicyProvider policyProvider) {
if (this.useLocalConfigurationProvider) {
this.server.refreshServiceAcl(configuration, policyProvider); this.server.refreshServiceAcl(configuration, policyProvider);
} else {
this.server.refreshServiceAclWithConfigration(configuration,
policyProvider);
}
} }
@Override @Override
@ -604,4 +622,9 @@ public synchronized void setAllocateResponse(AllocateResponse response) {
this.response = response; this.response = response;
} }
} }
@VisibleForTesting
public Server getServer() {
return this.server;
}
} }

Some files were not shown because too many files have changed in this diff Show More