Merge trunk into HA branch.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1211749 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
d9ea5bb489
|
@ -55,6 +55,8 @@ import java.util.Set;
|
||||||
* It does not have a default value.</li>
|
* It does not have a default value.</li>
|
||||||
* <li>kerberos.keytab: the keytab file containing the credentials for the Kerberos principal.
|
* <li>kerberos.keytab: the keytab file containing the credentials for the Kerberos principal.
|
||||||
* It does not have a default value.</li>
|
* It does not have a default value.</li>
|
||||||
|
* <li>kerberos.name.rules: kerberos names rules to resolve principal names, see
|
||||||
|
* {@link KerberosName#setRules(String)}</li>
|
||||||
* </ul>
|
* </ul>
|
||||||
*/
|
*/
|
||||||
public class KerberosAuthenticationHandler implements AuthenticationHandler {
|
public class KerberosAuthenticationHandler implements AuthenticationHandler {
|
||||||
|
@ -151,6 +153,11 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
|
||||||
throw new ServletException("Keytab does not exist: " + keytab);
|
throw new ServletException("Keytab does not exist: " + keytab);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
String nameRules = config.getProperty(NAME_RULES, null);
|
||||||
|
if (nameRules != null) {
|
||||||
|
KerberosName.setRules(nameRules);
|
||||||
|
}
|
||||||
|
|
||||||
Set<Principal> principals = new HashSet<Principal>();
|
Set<Principal> principals = new HashSet<Principal>();
|
||||||
principals.add(new KerberosPrincipal(principal));
|
principals.add(new KerberosPrincipal(principal));
|
||||||
Subject subject = new Subject(false, principals, new HashSet<Object>(), new HashSet<Object>());
|
Subject subject = new Subject(false, principals, new HashSet<Object>(), new HashSet<Object>());
|
||||||
|
|
|
@ -385,6 +385,15 @@ public class KerberosName {
|
||||||
rules = parseRules(ruleString);
|
rules = parseRules(ruleString);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Indicates if the name rules have been set.
|
||||||
|
*
|
||||||
|
* @return if the name rules have been set.
|
||||||
|
*/
|
||||||
|
public static boolean hasRulesBeenSet() {
|
||||||
|
return rules != null;
|
||||||
|
}
|
||||||
|
|
||||||
static void printRules() throws IOException {
|
static void printRules() throws IOException {
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for(Rule r: rules) {
|
for(Rule r: rules) {
|
||||||
|
|
|
@ -18,6 +18,7 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||||
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
|
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
|
||||||
import junit.framework.TestCase;
|
import junit.framework.TestCase;
|
||||||
import org.apache.commons.codec.binary.Base64;
|
import org.apache.commons.codec.binary.Base64;
|
||||||
|
import org.apache.hadoop.security.authentication.util.KerberosName;
|
||||||
import org.ietf.jgss.GSSContext;
|
import org.ietf.jgss.GSSContext;
|
||||||
import org.ietf.jgss.GSSManager;
|
import org.ietf.jgss.GSSManager;
|
||||||
import org.ietf.jgss.GSSName;
|
import org.ietf.jgss.GSSName;
|
||||||
|
@ -59,6 +60,35 @@ public class TestKerberosAuthenticationHandler extends TestCase {
|
||||||
super.tearDown();
|
super.tearDown();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testNameRules() throws Exception {
|
||||||
|
KerberosName kn = new KerberosName(KerberosTestUtils.getServerPrincipal());
|
||||||
|
assertEquals(KerberosTestUtils.getRealm(), kn.getRealm());
|
||||||
|
|
||||||
|
//destroy handler created in setUp()
|
||||||
|
handler.destroy();
|
||||||
|
|
||||||
|
KerberosName.setRules("RULE:[1:$1@$0](.*@FOO)s/@.*//\nDEFAULT");
|
||||||
|
|
||||||
|
handler = new KerberosAuthenticationHandler();
|
||||||
|
Properties props = new Properties();
|
||||||
|
props.setProperty(KerberosAuthenticationHandler.PRINCIPAL, KerberosTestUtils.getServerPrincipal());
|
||||||
|
props.setProperty(KerberosAuthenticationHandler.KEYTAB, KerberosTestUtils.getKeytabFile());
|
||||||
|
props.setProperty(KerberosAuthenticationHandler.NAME_RULES, "RULE:[1:$1@$0](.*@BAR)s/@.*//\nDEFAULT");
|
||||||
|
try {
|
||||||
|
handler.init(props);
|
||||||
|
} catch (Exception ex) {
|
||||||
|
}
|
||||||
|
kn = new KerberosName("bar@BAR");
|
||||||
|
assertEquals("bar", kn.getShortName());
|
||||||
|
kn = new KerberosName("bar@FOO");
|
||||||
|
try {
|
||||||
|
kn.getShortName();
|
||||||
|
fail();
|
||||||
|
}
|
||||||
|
catch (Exception ex) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public void testInit() throws Exception {
|
public void testInit() throws Exception {
|
||||||
assertEquals(KerberosTestUtils.getServerPrincipal(), handler.getPrincipal());
|
assertEquals(KerberosTestUtils.getServerPrincipal(), handler.getPrincipal());
|
||||||
assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab());
|
assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab());
|
||||||
|
|
|
@ -72,6 +72,8 @@ Trunk (unreleased changes)
|
||||||
HADOOP-7876. Provided access to encoded key in DelegationKey for
|
HADOOP-7876. Provided access to encoded key in DelegationKey for
|
||||||
use in protobuf based RPCs. (suresh)
|
use in protobuf based RPCs. (suresh)
|
||||||
|
|
||||||
|
HADOOP-7886. Add toString to FileStatus. (SreeHari via jghoman)
|
||||||
|
|
||||||
BUGS
|
BUGS
|
||||||
|
|
||||||
HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required
|
HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required
|
||||||
|
@ -115,6 +117,12 @@ Trunk (unreleased changes)
|
||||||
|
|
||||||
HADOOP-7874. native libs should be under lib/native/ dir. (tucu)
|
HADOOP-7874. native libs should be under lib/native/ dir. (tucu)
|
||||||
|
|
||||||
|
HADOOP-7887. KerberosAuthenticatorHandler is not setting
|
||||||
|
KerberosName name rules from configuration. (tucu)
|
||||||
|
|
||||||
|
HADOOP-7888. TestFailoverProxy fails intermittently on trunk. (Jason Lowe
|
||||||
|
via atm)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||||
|
|
|
@ -331,4 +331,23 @@ public class FileStatus implements Writable, Comparable {
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return getPath().hashCode();
|
return getPath().hashCode();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
sb.append(getClass().getSimpleName());
|
||||||
|
sb.append("{");
|
||||||
|
sb.append("path=" + path);
|
||||||
|
sb.append("; isDirectory=" + isdir);
|
||||||
|
if(!isDirectory()){
|
||||||
|
sb.append("; length=" + length);
|
||||||
|
sb.append("; replication=" + block_replication);
|
||||||
|
sb.append("; blocksize=" + blocksize);
|
||||||
|
}
|
||||||
|
sb.append("; owner=" + owner);
|
||||||
|
sb.append("; group=" + group);
|
||||||
|
sb.append("; permission=" + permission);
|
||||||
|
sb.append("}");
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -103,15 +103,12 @@ class RetryInvocationHandler implements InvocationHandler, Closeable {
|
||||||
if (invocationAttemptFailoverCount == proxyProviderFailoverCount) {
|
if (invocationAttemptFailoverCount == proxyProviderFailoverCount) {
|
||||||
proxyProvider.performFailover(currentProxy);
|
proxyProvider.performFailover(currentProxy);
|
||||||
proxyProviderFailoverCount++;
|
proxyProviderFailoverCount++;
|
||||||
|
currentProxy = proxyProvider.getProxy();
|
||||||
} else {
|
} else {
|
||||||
LOG.warn("A failover has occurred since the start of this method"
|
LOG.warn("A failover has occurred since the start of this method"
|
||||||
+ " invocation attempt.");
|
+ " invocation attempt.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// The call to getProxy() could technically only be made in the event
|
|
||||||
// performFailover() is called, but it needs to be out here for the
|
|
||||||
// purpose of testing.
|
|
||||||
currentProxy = proxyProvider.getProxy();
|
|
||||||
invocationFailoverCount++;
|
invocationFailoverCount++;
|
||||||
}
|
}
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
|
|
|
@ -56,12 +56,19 @@ public class HadoopKerberosName extends KerberosName {
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Set the static configuration to get the rules.
|
* Set the static configuration to get the rules.
|
||||||
|
* <p/>
|
||||||
|
* IMPORTANT: This method does a NOP if the rules have been set already.
|
||||||
|
* If there is a need to reset the rules, the {@link KerberosName#setRules(String)}
|
||||||
|
* method should be invoked directly.
|
||||||
|
*
|
||||||
* @param conf the new configuration
|
* @param conf the new configuration
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static void setConfiguration(Configuration conf) throws IOException {
|
public static void setConfiguration(Configuration conf) throws IOException {
|
||||||
String ruleString = conf.get("hadoop.security.auth_to_local", "DEFAULT");
|
if (!hasRulesBeenSet()) {
|
||||||
setRules(ruleString);
|
String ruleString = conf.get("hadoop.security.auth_to_local", "DEFAULT");
|
||||||
|
setRules(ruleString);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
|
|
|
@ -36,34 +36,18 @@ public class TestFailoverProxy {
|
||||||
private Object impl1;
|
private Object impl1;
|
||||||
private Object impl2;
|
private Object impl2;
|
||||||
|
|
||||||
private boolean latchEnabled = false;
|
|
||||||
private CountDownLatch getProxyLatch;
|
|
||||||
private int failoversOccurred = 0;
|
private int failoversOccurred = 0;
|
||||||
|
|
||||||
public FlipFlopProxyProvider(Class<?> iface, Object activeImpl,
|
public FlipFlopProxyProvider(Class<?> iface, Object activeImpl,
|
||||||
Object standbyImpl, int getProxyCountDown) {
|
Object standbyImpl) {
|
||||||
this.iface = iface;
|
this.iface = iface;
|
||||||
this.impl1 = activeImpl;
|
this.impl1 = activeImpl;
|
||||||
this.impl2 = standbyImpl;
|
this.impl2 = standbyImpl;
|
||||||
currentlyActive = impl1;
|
currentlyActive = impl1;
|
||||||
getProxyLatch = new CountDownLatch(getProxyCountDown);
|
|
||||||
}
|
|
||||||
|
|
||||||
public FlipFlopProxyProvider(Class<?> iface, Object activeImpl,
|
|
||||||
Object standbyImpl) {
|
|
||||||
this(iface, activeImpl, standbyImpl, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Object getProxy() {
|
public Object getProxy() {
|
||||||
if (latchEnabled) {
|
|
||||||
getProxyLatch.countDown();
|
|
||||||
try {
|
|
||||||
getProxyLatch.await();
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return currentlyActive;
|
return currentlyActive;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,10 +67,6 @@ public class TestFailoverProxy {
|
||||||
// Nothing to do.
|
// Nothing to do.
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setLatchEnabled(boolean latchEnabled) {
|
|
||||||
this.latchEnabled = latchEnabled;
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getFailoversOccurred() {
|
public int getFailoversOccurred() {
|
||||||
return failoversOccurred;
|
return failoversOccurred;
|
||||||
}
|
}
|
||||||
|
@ -214,6 +194,32 @@ public class TestFailoverProxy {
|
||||||
assertEquals("impl2", unreliable.succeedsOnceThenFailsReturningStringIdempotent());
|
assertEquals("impl2", unreliable.succeedsOnceThenFailsReturningStringIdempotent());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static class SynchronizedUnreliableImplementation extends UnreliableImplementation {
|
||||||
|
|
||||||
|
private CountDownLatch methodLatch;
|
||||||
|
|
||||||
|
public SynchronizedUnreliableImplementation(String identifier,
|
||||||
|
TypeOfExceptionToFailWith exceptionToFailWith, int threadCount) {
|
||||||
|
super(identifier, exceptionToFailWith);
|
||||||
|
|
||||||
|
methodLatch = new CountDownLatch(threadCount);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String failsIfIdentifierDoesntMatch(String identifier)
|
||||||
|
throws UnreliableException, StandbyException, IOException {
|
||||||
|
// Wait until all threads are trying to invoke this method
|
||||||
|
methodLatch.countDown();
|
||||||
|
try {
|
||||||
|
methodLatch.await();
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
return super.failsIfIdentifierDoesntMatch(identifier);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
private static class ConcurrentMethodThread extends Thread {
|
private static class ConcurrentMethodThread extends Thread {
|
||||||
|
|
||||||
private UnreliableInterface unreliable;
|
private UnreliableInterface unreliable;
|
||||||
|
@ -240,11 +246,11 @@ public class TestFailoverProxy {
|
||||||
public void testConcurrentMethodFailures() throws InterruptedException {
|
public void testConcurrentMethodFailures() throws InterruptedException {
|
||||||
FlipFlopProxyProvider proxyProvider = new FlipFlopProxyProvider(
|
FlipFlopProxyProvider proxyProvider = new FlipFlopProxyProvider(
|
||||||
UnreliableInterface.class,
|
UnreliableInterface.class,
|
||||||
new UnreliableImplementation("impl1",
|
new SynchronizedUnreliableImplementation("impl1",
|
||||||
TypeOfExceptionToFailWith.STANDBY_EXCEPTION),
|
TypeOfExceptionToFailWith.STANDBY_EXCEPTION,
|
||||||
|
2),
|
||||||
new UnreliableImplementation("impl2",
|
new UnreliableImplementation("impl2",
|
||||||
TypeOfExceptionToFailWith.STANDBY_EXCEPTION),
|
TypeOfExceptionToFailWith.STANDBY_EXCEPTION));
|
||||||
2);
|
|
||||||
|
|
||||||
final UnreliableInterface unreliable = (UnreliableInterface)RetryProxy
|
final UnreliableInterface unreliable = (UnreliableInterface)RetryProxy
|
||||||
.create(UnreliableInterface.class, proxyProvider,
|
.create(UnreliableInterface.class, proxyProvider,
|
||||||
|
@ -253,9 +259,6 @@ public class TestFailoverProxy {
|
||||||
ConcurrentMethodThread t1 = new ConcurrentMethodThread(unreliable);
|
ConcurrentMethodThread t1 = new ConcurrentMethodThread(unreliable);
|
||||||
ConcurrentMethodThread t2 = new ConcurrentMethodThread(unreliable);
|
ConcurrentMethodThread t2 = new ConcurrentMethodThread(unreliable);
|
||||||
|
|
||||||
// Getting a proxy will now wait on a latch.
|
|
||||||
proxyProvider.setLatchEnabled(true);
|
|
||||||
|
|
||||||
t1.start();
|
t1.start();
|
||||||
t2.start();
|
t2.start();
|
||||||
t1.join();
|
t1.join();
|
||||||
|
|
|
@ -15,6 +15,15 @@ Trunk (unreleased changes)
|
||||||
|
|
||||||
HDFS-2581. Implement protobuf service for JournalProtocol. (suresh)
|
HDFS-2581. Implement protobuf service for JournalProtocol. (suresh)
|
||||||
|
|
||||||
|
HDFS-2618. Implement protobuf service for NamenodeProtocol. (suresh)
|
||||||
|
|
||||||
|
HDFS-2629. Implement protobuf service for InterDatanodeProtocol. (suresh)
|
||||||
|
|
||||||
|
HDFS-2636. Implement protobuf service for ClientDatanodeProtocol. (suresh)
|
||||||
|
|
||||||
|
HDFS-2430. The number of failed or low-resource volumes the NN can tolerate
|
||||||
|
should be configurable. (atm)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
||||||
HADOOP-7524 Change RPC to allow multiple protocols including multuple
|
HADOOP-7524 Change RPC to allow multiple protocols including multuple
|
||||||
|
|
|
@ -130,6 +130,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final boolean DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT = true;
|
public static final boolean DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT = true;
|
||||||
public static final String DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY = "dfs.namenode.num.checkpoints.retained";
|
public static final String DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY = "dfs.namenode.num.checkpoints.retained";
|
||||||
public static final int DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_DEFAULT = 2;
|
public static final int DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_DEFAULT = 2;
|
||||||
|
public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
|
||||||
|
public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
|
||||||
|
|
||||||
public static final String DFS_LIST_LIMIT = "dfs.ls.limit";
|
public static final String DFS_LIST_LIMIT = "dfs.ls.limit";
|
||||||
public static final int DFS_LIST_LIMIT_DEFAULT = 1000;
|
public static final int DFS_LIST_LIMIT_DEFAULT = 1000;
|
||||||
|
@ -164,6 +166,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final String DFS_NAMENODE_EDITS_DIR_KEY = "dfs.namenode.edits.dir";
|
public static final String DFS_NAMENODE_EDITS_DIR_KEY = "dfs.namenode.edits.dir";
|
||||||
public static final String DFS_NAMENODE_SHARED_EDITS_DIR_KEY = "dfs.namenode.shared.edits.dir";
|
public static final String DFS_NAMENODE_SHARED_EDITS_DIR_KEY = "dfs.namenode.shared.edits.dir";
|
||||||
public static final String DFS_NAMENODE_EDITS_PLUGIN_PREFIX = "dfs.namenode.edits.journal-plugin";
|
public static final String DFS_NAMENODE_EDITS_PLUGIN_PREFIX = "dfs.namenode.edits.journal-plugin";
|
||||||
|
public static final String DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY = "dfs.namenode.edits.dir.required";
|
||||||
public static final String DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size";
|
public static final String DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size";
|
||||||
public static final String DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
|
public static final String DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
|
||||||
public static final String DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
|
public static final String DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
|
||||||
|
@ -306,6 +309,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final String DFS_NAMENODE_DU_RESERVED_KEY = "dfs.namenode.resource.du.reserved";
|
public static final String DFS_NAMENODE_DU_RESERVED_KEY = "dfs.namenode.resource.du.reserved";
|
||||||
public static final long DFS_NAMENODE_DU_RESERVED_DEFAULT = 1024 * 1024 * 100; // 100 MB
|
public static final long DFS_NAMENODE_DU_RESERVED_DEFAULT = 1024 * 1024 * 100; // 100 MB
|
||||||
public static final String DFS_NAMENODE_CHECKED_VOLUMES_KEY = "dfs.namenode.resource.checked.volumes";
|
public static final String DFS_NAMENODE_CHECKED_VOLUMES_KEY = "dfs.namenode.resource.checked.volumes";
|
||||||
|
public static final String DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY = "dfs.namenode.resource.checked.volumes.minimum";
|
||||||
|
public static final int DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_DEFAULT = 1;
|
||||||
public static final String DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY = "dfs.web.authentication.kerberos.principal";
|
public static final String DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY = "dfs.web.authentication.kerberos.principal";
|
||||||
public static final String DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY = "dfs.web.authentication.kerberos.keytab";
|
public static final String DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY = "dfs.web.authentication.kerberos.keytab";
|
||||||
|
|
||||||
|
|
|
@ -66,6 +66,11 @@ public class BlockLocalPathInfo implements Writable {
|
||||||
*/
|
*/
|
||||||
public String getBlockPath() {return localBlockPath;}
|
public String getBlockPath() {return localBlockPath;}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the Block
|
||||||
|
*/
|
||||||
|
public ExtendedBlock getBlock() { return block;}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the Block metadata file.
|
* Get the Block metadata file.
|
||||||
* @return Block metadata file.
|
* @return Block metadata file.
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -8,7 +8,7 @@ public final class HdfsProtos {
|
||||||
public static void registerAllExtensions(
|
public static void registerAllExtensions(
|
||||||
com.google.protobuf.ExtensionRegistry registry) {
|
com.google.protobuf.ExtensionRegistry registry) {
|
||||||
}
|
}
|
||||||
public enum ReplicaState
|
public enum ReplicaStateProto
|
||||||
implements com.google.protobuf.ProtocolMessageEnum {
|
implements com.google.protobuf.ProtocolMessageEnum {
|
||||||
FINALIZED(0, 0),
|
FINALIZED(0, 0),
|
||||||
RBW(1, 1),
|
RBW(1, 1),
|
||||||
|
@ -26,7 +26,7 @@ public final class HdfsProtos {
|
||||||
|
|
||||||
public final int getNumber() { return value; }
|
public final int getNumber() { return value; }
|
||||||
|
|
||||||
public static ReplicaState valueOf(int value) {
|
public static ReplicaStateProto valueOf(int value) {
|
||||||
switch (value) {
|
switch (value) {
|
||||||
case 0: return FINALIZED;
|
case 0: return FINALIZED;
|
||||||
case 1: return RBW;
|
case 1: return RBW;
|
||||||
|
@ -37,15 +37,15 @@ public final class HdfsProtos {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static com.google.protobuf.Internal.EnumLiteMap<ReplicaState>
|
public static com.google.protobuf.Internal.EnumLiteMap<ReplicaStateProto>
|
||||||
internalGetValueMap() {
|
internalGetValueMap() {
|
||||||
return internalValueMap;
|
return internalValueMap;
|
||||||
}
|
}
|
||||||
private static com.google.protobuf.Internal.EnumLiteMap<ReplicaState>
|
private static com.google.protobuf.Internal.EnumLiteMap<ReplicaStateProto>
|
||||||
internalValueMap =
|
internalValueMap =
|
||||||
new com.google.protobuf.Internal.EnumLiteMap<ReplicaState>() {
|
new com.google.protobuf.Internal.EnumLiteMap<ReplicaStateProto>() {
|
||||||
public ReplicaState findValueByNumber(int number) {
|
public ReplicaStateProto findValueByNumber(int number) {
|
||||||
return ReplicaState.valueOf(number);
|
return ReplicaStateProto.valueOf(number);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -62,11 +62,11 @@ public final class HdfsProtos {
|
||||||
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(0);
|
return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final ReplicaState[] VALUES = {
|
private static final ReplicaStateProto[] VALUES = {
|
||||||
FINALIZED, RBW, RWR, RUR, TEMPORARY,
|
FINALIZED, RBW, RWR, RUR, TEMPORARY,
|
||||||
};
|
};
|
||||||
|
|
||||||
public static ReplicaState valueOf(
|
public static ReplicaStateProto valueOf(
|
||||||
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
|
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
|
||||||
if (desc.getType() != getDescriptor()) {
|
if (desc.getType() != getDescriptor()) {
|
||||||
throw new java.lang.IllegalArgumentException(
|
throw new java.lang.IllegalArgumentException(
|
||||||
|
@ -78,12 +78,12 @@ public final class HdfsProtos {
|
||||||
private final int index;
|
private final int index;
|
||||||
private final int value;
|
private final int value;
|
||||||
|
|
||||||
private ReplicaState(int index, int value) {
|
private ReplicaStateProto(int index, int value) {
|
||||||
this.index = index;
|
this.index = index;
|
||||||
this.value = value;
|
this.value = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
// @@protoc_insertion_point(enum_scope:ReplicaState)
|
// @@protoc_insertion_point(enum_scope:ReplicaStateProto)
|
||||||
}
|
}
|
||||||
|
|
||||||
public interface ExtendedBlockProtoOrBuilder
|
public interface ExtendedBlockProtoOrBuilder
|
||||||
|
@ -14903,15 +14903,10 @@ public final class HdfsProtos {
|
||||||
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock();
|
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock();
|
||||||
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder();
|
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder();
|
||||||
|
|
||||||
// repeated .DatanodeIDProto datanodeIDs = 2;
|
// repeated string datanodeIDs = 2;
|
||||||
java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>
|
java.util.List<String> getDatanodeIDsList();
|
||||||
getDatanodeIDsList();
|
|
||||||
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeIDs(int index);
|
|
||||||
int getDatanodeIDsCount();
|
int getDatanodeIDsCount();
|
||||||
java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
|
String getDatanodeIDs(int index);
|
||||||
getDatanodeIDsOrBuilderList();
|
|
||||||
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDsOrBuilder(
|
|
||||||
int index);
|
|
||||||
}
|
}
|
||||||
public static final class BlockWithLocationsProto extends
|
public static final class BlockWithLocationsProto extends
|
||||||
com.google.protobuf.GeneratedMessage
|
com.google.protobuf.GeneratedMessage
|
||||||
|
@ -14955,30 +14950,23 @@ public final class HdfsProtos {
|
||||||
return block_;
|
return block_;
|
||||||
}
|
}
|
||||||
|
|
||||||
// repeated .DatanodeIDProto datanodeIDs = 2;
|
// repeated string datanodeIDs = 2;
|
||||||
public static final int DATANODEIDS_FIELD_NUMBER = 2;
|
public static final int DATANODEIDS_FIELD_NUMBER = 2;
|
||||||
private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> datanodeIDs_;
|
private com.google.protobuf.LazyStringList datanodeIDs_;
|
||||||
public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> getDatanodeIDsList() {
|
public java.util.List<String>
|
||||||
return datanodeIDs_;
|
getDatanodeIDsList() {
|
||||||
}
|
|
||||||
public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
|
|
||||||
getDatanodeIDsOrBuilderList() {
|
|
||||||
return datanodeIDs_;
|
return datanodeIDs_;
|
||||||
}
|
}
|
||||||
public int getDatanodeIDsCount() {
|
public int getDatanodeIDsCount() {
|
||||||
return datanodeIDs_.size();
|
return datanodeIDs_.size();
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeIDs(int index) {
|
public String getDatanodeIDs(int index) {
|
||||||
return datanodeIDs_.get(index);
|
|
||||||
}
|
|
||||||
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDsOrBuilder(
|
|
||||||
int index) {
|
|
||||||
return datanodeIDs_.get(index);
|
return datanodeIDs_.get(index);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void initFields() {
|
private void initFields() {
|
||||||
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
||||||
datanodeIDs_ = java.util.Collections.emptyList();
|
datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
|
||||||
}
|
}
|
||||||
private byte memoizedIsInitialized = -1;
|
private byte memoizedIsInitialized = -1;
|
||||||
public final boolean isInitialized() {
|
public final boolean isInitialized() {
|
||||||
|
@ -14993,12 +14981,6 @@ public final class HdfsProtos {
|
||||||
memoizedIsInitialized = 0;
|
memoizedIsInitialized = 0;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
for (int i = 0; i < getDatanodeIDsCount(); i++) {
|
|
||||||
if (!getDatanodeIDs(i).isInitialized()) {
|
|
||||||
memoizedIsInitialized = 0;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
memoizedIsInitialized = 1;
|
memoizedIsInitialized = 1;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -15010,7 +14992,7 @@ public final class HdfsProtos {
|
||||||
output.writeMessage(1, block_);
|
output.writeMessage(1, block_);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < datanodeIDs_.size(); i++) {
|
for (int i = 0; i < datanodeIDs_.size(); i++) {
|
||||||
output.writeMessage(2, datanodeIDs_.get(i));
|
output.writeBytes(2, datanodeIDs_.getByteString(i));
|
||||||
}
|
}
|
||||||
getUnknownFields().writeTo(output);
|
getUnknownFields().writeTo(output);
|
||||||
}
|
}
|
||||||
|
@ -15025,9 +15007,14 @@ public final class HdfsProtos {
|
||||||
size += com.google.protobuf.CodedOutputStream
|
size += com.google.protobuf.CodedOutputStream
|
||||||
.computeMessageSize(1, block_);
|
.computeMessageSize(1, block_);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < datanodeIDs_.size(); i++) {
|
{
|
||||||
size += com.google.protobuf.CodedOutputStream
|
int dataSize = 0;
|
||||||
.computeMessageSize(2, datanodeIDs_.get(i));
|
for (int i = 0; i < datanodeIDs_.size(); i++) {
|
||||||
|
dataSize += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeBytesSizeNoTag(datanodeIDs_.getByteString(i));
|
||||||
|
}
|
||||||
|
size += dataSize;
|
||||||
|
size += 1 * getDatanodeIDsList().size();
|
||||||
}
|
}
|
||||||
size += getUnknownFields().getSerializedSize();
|
size += getUnknownFields().getSerializedSize();
|
||||||
memoizedSerializedSize = size;
|
memoizedSerializedSize = size;
|
||||||
|
@ -15185,7 +15172,6 @@ public final class HdfsProtos {
|
||||||
private void maybeForceBuilderInitialization() {
|
private void maybeForceBuilderInitialization() {
|
||||||
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
||||||
getBlockFieldBuilder();
|
getBlockFieldBuilder();
|
||||||
getDatanodeIDsFieldBuilder();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
private static Builder create() {
|
private static Builder create() {
|
||||||
|
@ -15200,12 +15186,8 @@ public final class HdfsProtos {
|
||||||
blockBuilder_.clear();
|
blockBuilder_.clear();
|
||||||
}
|
}
|
||||||
bitField0_ = (bitField0_ & ~0x00000001);
|
bitField0_ = (bitField0_ & ~0x00000001);
|
||||||
if (datanodeIDsBuilder_ == null) {
|
datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
|
||||||
datanodeIDs_ = java.util.Collections.emptyList();
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
bitField0_ = (bitField0_ & ~0x00000002);
|
|
||||||
} else {
|
|
||||||
datanodeIDsBuilder_.clear();
|
|
||||||
}
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15252,15 +15234,12 @@ public final class HdfsProtos {
|
||||||
} else {
|
} else {
|
||||||
result.block_ = blockBuilder_.build();
|
result.block_ = blockBuilder_.build();
|
||||||
}
|
}
|
||||||
if (datanodeIDsBuilder_ == null) {
|
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||||
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
datanodeIDs_ = new com.google.protobuf.UnmodifiableLazyStringList(
|
||||||
datanodeIDs_ = java.util.Collections.unmodifiableList(datanodeIDs_);
|
datanodeIDs_);
|
||||||
bitField0_ = (bitField0_ & ~0x00000002);
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
}
|
|
||||||
result.datanodeIDs_ = datanodeIDs_;
|
|
||||||
} else {
|
|
||||||
result.datanodeIDs_ = datanodeIDsBuilder_.build();
|
|
||||||
}
|
}
|
||||||
|
result.datanodeIDs_ = datanodeIDs_;
|
||||||
result.bitField0_ = to_bitField0_;
|
result.bitField0_ = to_bitField0_;
|
||||||
onBuilt();
|
onBuilt();
|
||||||
return result;
|
return result;
|
||||||
|
@ -15280,31 +15259,15 @@ public final class HdfsProtos {
|
||||||
if (other.hasBlock()) {
|
if (other.hasBlock()) {
|
||||||
mergeBlock(other.getBlock());
|
mergeBlock(other.getBlock());
|
||||||
}
|
}
|
||||||
if (datanodeIDsBuilder_ == null) {
|
if (!other.datanodeIDs_.isEmpty()) {
|
||||||
if (!other.datanodeIDs_.isEmpty()) {
|
if (datanodeIDs_.isEmpty()) {
|
||||||
if (datanodeIDs_.isEmpty()) {
|
datanodeIDs_ = other.datanodeIDs_;
|
||||||
datanodeIDs_ = other.datanodeIDs_;
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
bitField0_ = (bitField0_ & ~0x00000002);
|
} else {
|
||||||
} else {
|
ensureDatanodeIDsIsMutable();
|
||||||
ensureDatanodeIDsIsMutable();
|
datanodeIDs_.addAll(other.datanodeIDs_);
|
||||||
datanodeIDs_.addAll(other.datanodeIDs_);
|
|
||||||
}
|
|
||||||
onChanged();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (!other.datanodeIDs_.isEmpty()) {
|
|
||||||
if (datanodeIDsBuilder_.isEmpty()) {
|
|
||||||
datanodeIDsBuilder_.dispose();
|
|
||||||
datanodeIDsBuilder_ = null;
|
|
||||||
datanodeIDs_ = other.datanodeIDs_;
|
|
||||||
bitField0_ = (bitField0_ & ~0x00000002);
|
|
||||||
datanodeIDsBuilder_ =
|
|
||||||
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
|
|
||||||
getDatanodeIDsFieldBuilder() : null;
|
|
||||||
} else {
|
|
||||||
datanodeIDsBuilder_.addAllMessages(other.datanodeIDs_);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
onChanged();
|
||||||
}
|
}
|
||||||
this.mergeUnknownFields(other.getUnknownFields());
|
this.mergeUnknownFields(other.getUnknownFields());
|
||||||
return this;
|
return this;
|
||||||
|
@ -15319,12 +15282,6 @@ public final class HdfsProtos {
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
for (int i = 0; i < getDatanodeIDsCount(); i++) {
|
|
||||||
if (!getDatanodeIDs(i).isInitialized()) {
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15361,9 +15318,8 @@ public final class HdfsProtos {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 18: {
|
case 18: {
|
||||||
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder();
|
ensureDatanodeIDsIsMutable();
|
||||||
input.readMessage(subBuilder, extensionRegistry);
|
datanodeIDs_.add(input.readBytes());
|
||||||
addDatanodeIDs(subBuilder.buildPartial());
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15462,190 +15418,60 @@ public final class HdfsProtos {
|
||||||
return blockBuilder_;
|
return blockBuilder_;
|
||||||
}
|
}
|
||||||
|
|
||||||
// repeated .DatanodeIDProto datanodeIDs = 2;
|
// repeated string datanodeIDs = 2;
|
||||||
private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> datanodeIDs_ =
|
private com.google.protobuf.LazyStringList datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
|
||||||
java.util.Collections.emptyList();
|
|
||||||
private void ensureDatanodeIDsIsMutable() {
|
private void ensureDatanodeIDsIsMutable() {
|
||||||
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
|
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||||
datanodeIDs_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>(datanodeIDs_);
|
datanodeIDs_ = new com.google.protobuf.LazyStringArrayList(datanodeIDs_);
|
||||||
bitField0_ |= 0x00000002;
|
bitField0_ |= 0x00000002;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
public java.util.List<String>
|
||||||
private com.google.protobuf.RepeatedFieldBuilder<
|
getDatanodeIDsList() {
|
||||||
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodeIDsBuilder_;
|
return java.util.Collections.unmodifiableList(datanodeIDs_);
|
||||||
|
|
||||||
public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> getDatanodeIDsList() {
|
|
||||||
if (datanodeIDsBuilder_ == null) {
|
|
||||||
return java.util.Collections.unmodifiableList(datanodeIDs_);
|
|
||||||
} else {
|
|
||||||
return datanodeIDsBuilder_.getMessageList();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
public int getDatanodeIDsCount() {
|
public int getDatanodeIDsCount() {
|
||||||
if (datanodeIDsBuilder_ == null) {
|
return datanodeIDs_.size();
|
||||||
return datanodeIDs_.size();
|
|
||||||
} else {
|
|
||||||
return datanodeIDsBuilder_.getCount();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeIDs(int index) {
|
public String getDatanodeIDs(int index) {
|
||||||
if (datanodeIDsBuilder_ == null) {
|
return datanodeIDs_.get(index);
|
||||||
return datanodeIDs_.get(index);
|
|
||||||
} else {
|
|
||||||
return datanodeIDsBuilder_.getMessage(index);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
public Builder setDatanodeIDs(
|
public Builder setDatanodeIDs(
|
||||||
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
|
int index, String value) {
|
||||||
if (datanodeIDsBuilder_ == null) {
|
if (value == null) {
|
||||||
if (value == null) {
|
throw new NullPointerException();
|
||||||
throw new NullPointerException();
|
}
|
||||||
}
|
ensureDatanodeIDsIsMutable();
|
||||||
ensureDatanodeIDsIsMutable();
|
datanodeIDs_.set(index, value);
|
||||||
datanodeIDs_.set(index, value);
|
onChanged();
|
||||||
onChanged();
|
|
||||||
} else {
|
|
||||||
datanodeIDsBuilder_.setMessage(index, value);
|
|
||||||
}
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder setDatanodeIDs(
|
public Builder addDatanodeIDs(String value) {
|
||||||
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
|
if (value == null) {
|
||||||
if (datanodeIDsBuilder_ == null) {
|
throw new NullPointerException();
|
||||||
ensureDatanodeIDsIsMutable();
|
}
|
||||||
datanodeIDs_.set(index, builderForValue.build());
|
ensureDatanodeIDsIsMutable();
|
||||||
onChanged();
|
datanodeIDs_.add(value);
|
||||||
} else {
|
onChanged();
|
||||||
datanodeIDsBuilder_.setMessage(index, builderForValue.build());
|
|
||||||
}
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
public Builder addDatanodeIDs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
|
|
||||||
if (datanodeIDsBuilder_ == null) {
|
|
||||||
if (value == null) {
|
|
||||||
throw new NullPointerException();
|
|
||||||
}
|
|
||||||
ensureDatanodeIDsIsMutable();
|
|
||||||
datanodeIDs_.add(value);
|
|
||||||
onChanged();
|
|
||||||
} else {
|
|
||||||
datanodeIDsBuilder_.addMessage(value);
|
|
||||||
}
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
public Builder addDatanodeIDs(
|
|
||||||
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
|
|
||||||
if (datanodeIDsBuilder_ == null) {
|
|
||||||
if (value == null) {
|
|
||||||
throw new NullPointerException();
|
|
||||||
}
|
|
||||||
ensureDatanodeIDsIsMutable();
|
|
||||||
datanodeIDs_.add(index, value);
|
|
||||||
onChanged();
|
|
||||||
} else {
|
|
||||||
datanodeIDsBuilder_.addMessage(index, value);
|
|
||||||
}
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
public Builder addDatanodeIDs(
|
|
||||||
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
|
|
||||||
if (datanodeIDsBuilder_ == null) {
|
|
||||||
ensureDatanodeIDsIsMutable();
|
|
||||||
datanodeIDs_.add(builderForValue.build());
|
|
||||||
onChanged();
|
|
||||||
} else {
|
|
||||||
datanodeIDsBuilder_.addMessage(builderForValue.build());
|
|
||||||
}
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
public Builder addDatanodeIDs(
|
|
||||||
int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
|
|
||||||
if (datanodeIDsBuilder_ == null) {
|
|
||||||
ensureDatanodeIDsIsMutable();
|
|
||||||
datanodeIDs_.add(index, builderForValue.build());
|
|
||||||
onChanged();
|
|
||||||
} else {
|
|
||||||
datanodeIDsBuilder_.addMessage(index, builderForValue.build());
|
|
||||||
}
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder addAllDatanodeIDs(
|
public Builder addAllDatanodeIDs(
|
||||||
java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> values) {
|
java.lang.Iterable<String> values) {
|
||||||
if (datanodeIDsBuilder_ == null) {
|
ensureDatanodeIDsIsMutable();
|
||||||
ensureDatanodeIDsIsMutable();
|
super.addAll(values, datanodeIDs_);
|
||||||
super.addAll(values, datanodeIDs_);
|
onChanged();
|
||||||
onChanged();
|
|
||||||
} else {
|
|
||||||
datanodeIDsBuilder_.addAllMessages(values);
|
|
||||||
}
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder clearDatanodeIDs() {
|
public Builder clearDatanodeIDs() {
|
||||||
if (datanodeIDsBuilder_ == null) {
|
datanodeIDs_ = com.google.protobuf.LazyStringArrayList.EMPTY;
|
||||||
datanodeIDs_ = java.util.Collections.emptyList();
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
bitField0_ = (bitField0_ & ~0x00000002);
|
onChanged();
|
||||||
onChanged();
|
|
||||||
} else {
|
|
||||||
datanodeIDsBuilder_.clear();
|
|
||||||
}
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
public Builder removeDatanodeIDs(int index) {
|
void addDatanodeIDs(com.google.protobuf.ByteString value) {
|
||||||
if (datanodeIDsBuilder_ == null) {
|
ensureDatanodeIDsIsMutable();
|
||||||
ensureDatanodeIDsIsMutable();
|
datanodeIDs_.add(value);
|
||||||
datanodeIDs_.remove(index);
|
onChanged();
|
||||||
onChanged();
|
|
||||||
} else {
|
|
||||||
datanodeIDsBuilder_.remove(index);
|
|
||||||
}
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodeIDsBuilder(
|
|
||||||
int index) {
|
|
||||||
return getDatanodeIDsFieldBuilder().getBuilder(index);
|
|
||||||
}
|
|
||||||
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDsOrBuilder(
|
|
||||||
int index) {
|
|
||||||
if (datanodeIDsBuilder_ == null) {
|
|
||||||
return datanodeIDs_.get(index); } else {
|
|
||||||
return datanodeIDsBuilder_.getMessageOrBuilder(index);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
|
|
||||||
getDatanodeIDsOrBuilderList() {
|
|
||||||
if (datanodeIDsBuilder_ != null) {
|
|
||||||
return datanodeIDsBuilder_.getMessageOrBuilderList();
|
|
||||||
} else {
|
|
||||||
return java.util.Collections.unmodifiableList(datanodeIDs_);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addDatanodeIDsBuilder() {
|
|
||||||
return getDatanodeIDsFieldBuilder().addBuilder(
|
|
||||||
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
|
|
||||||
}
|
|
||||||
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addDatanodeIDsBuilder(
|
|
||||||
int index) {
|
|
||||||
return getDatanodeIDsFieldBuilder().addBuilder(
|
|
||||||
index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
|
|
||||||
}
|
|
||||||
public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder>
|
|
||||||
getDatanodeIDsBuilderList() {
|
|
||||||
return getDatanodeIDsFieldBuilder().getBuilderList();
|
|
||||||
}
|
|
||||||
private com.google.protobuf.RepeatedFieldBuilder<
|
|
||||||
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
|
|
||||||
getDatanodeIDsFieldBuilder() {
|
|
||||||
if (datanodeIDsBuilder_ == null) {
|
|
||||||
datanodeIDsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
|
||||||
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
|
|
||||||
datanodeIDs_,
|
|
||||||
((bitField0_ & 0x00000002) == 0x00000002),
|
|
||||||
getParentForChildren(),
|
|
||||||
isClean());
|
|
||||||
datanodeIDs_ = null;
|
|
||||||
}
|
|
||||||
return datanodeIDsBuilder_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// @@protoc_insertion_point(builder_scope:BlockWithLocationsProto)
|
// @@protoc_insertion_point(builder_scope:BlockWithLocationsProto)
|
||||||
|
@ -20348,29 +20174,28 @@ public final class HdfsProtos {
|
||||||
"ature\030\001 \002(\0132\031.CheckpointSignatureProto\022\031" +
|
"ature\030\001 \002(\0132\031.CheckpointSignatureProto\022\031" +
|
||||||
"\n\021needToReturnImage\030\002 \002(\010\"A\n\nBlockProto\022" +
|
"\n\021needToReturnImage\030\002 \002(\010\"A\n\nBlockProto\022" +
|
||||||
"\017\n\007blockId\030\001 \002(\004\022\020\n\010genStamp\030\002 \002(\004\022\020\n\010nu" +
|
"\017\n\007blockId\030\001 \002(\004\022\020\n\010genStamp\030\002 \002(\004\022\020\n\010nu" +
|
||||||
"mBytes\030\003 \001(\004\"\\\n\027BlockWithLocationsProto\022",
|
"mBytes\030\003 \001(\004\"J\n\027BlockWithLocationsProto\022",
|
||||||
"\032\n\005block\030\001 \002(\0132\013.BlockProto\022%\n\013datanodeI" +
|
"\032\n\005block\030\001 \002(\0132\013.BlockProto\022\023\n\013datanodeI" +
|
||||||
"Ds\030\002 \003(\0132\020.DatanodeIDProto\"D\n\030BlocksWith" +
|
"Ds\030\002 \003(\t\"D\n\030BlocksWithLocationsProto\022(\n\006" +
|
||||||
"LocationsProto\022(\n\006blocks\030\001 \003(\0132\030.BlockWi" +
|
"blocks\030\001 \003(\0132\030.BlockWithLocationsProto\"8" +
|
||||||
"thLocationsProto\"8\n\022RemoteEditLogProto\022\021" +
|
"\n\022RemoteEditLogProto\022\021\n\tstartTxId\030\001 \002(\004\022" +
|
||||||
"\n\tstartTxId\030\001 \002(\004\022\017\n\007endTxId\030\002 \002(\004\"?\n\032Re" +
|
"\017\n\007endTxId\030\002 \002(\004\"?\n\032RemoteEditLogManifes" +
|
||||||
"moteEditLogManifestProto\022!\n\004logs\030\001 \003(\0132\023" +
|
"tProto\022!\n\004logs\030\001 \003(\0132\023.RemoteEditLogProt" +
|
||||||
".RemoteEditLogProto\"\203\001\n\022NamespaceInfoPro" +
|
"o\"\203\001\n\022NamespaceInfoProto\022\024\n\014buildVersion" +
|
||||||
"to\022\024\n\014buildVersion\030\001 \002(\t\022\032\n\022distUpgradeV" +
|
"\030\001 \002(\t\022\032\n\022distUpgradeVersion\030\002 \002(\r\022\023\n\013bl" +
|
||||||
"ersion\030\002 \002(\r\022\023\n\013blockPoolID\030\003 \002(\t\022&\n\013sto" +
|
"ockPoolID\030\003 \002(\t\022&\n\013storageInfo\030\004 \002(\0132\021.S" +
|
||||||
"rageInfo\030\004 \002(\0132\021.StorageInfoProto\"D\n\rBlo",
|
"torageInfoProto\"D\n\rBlockKeyProto\022\r\n\005keyI",
|
||||||
"ckKeyProto\022\r\n\005keyId\030\001 \002(\r\022\022\n\nexpiryDate\030" +
|
"d\030\001 \002(\r\022\022\n\nexpiryDate\030\002 \002(\004\022\020\n\010keyBytes\030" +
|
||||||
"\002 \002(\004\022\020\n\010keyBytes\030\003 \002(\014\"\254\001\n\026ExportedBloc" +
|
"\003 \002(\014\"\254\001\n\026ExportedBlockKeysProto\022\033\n\023isBl" +
|
||||||
"kKeysProto\022\033\n\023isBlockTokenEnabled\030\001 \002(\010\022" +
|
"ockTokenEnabled\030\001 \002(\010\022\031\n\021keyUpdateInterv" +
|
||||||
"\031\n\021keyUpdateInterval\030\002 \002(\004\022\025\n\rtokenLifeT" +
|
"al\030\002 \002(\004\022\025\n\rtokenLifeTime\030\003 \002(\004\022\"\n\ncurre" +
|
||||||
"ime\030\003 \002(\004\022\"\n\ncurrentKey\030\004 \002(\0132\016.BlockKey" +
|
"ntKey\030\004 \002(\0132\016.BlockKeyProto\022\037\n\007allKeys\030\005" +
|
||||||
"Proto\022\037\n\007allKeys\030\005 \003(\0132\016.BlockKeyProto\"N" +
|
" \003(\0132\016.BlockKeyProto\"N\n\024RecoveringBlockP" +
|
||||||
"\n\024RecoveringBlockProto\022\023\n\013newGenStamp\030\001 " +
|
"roto\022\023\n\013newGenStamp\030\001 \002(\004\022!\n\005block\030\002 \002(\013" +
|
||||||
"\002(\004\022!\n\005block\030\002 \002(\0132\022.LocatedBlockProto*G" +
|
"2\022.LocatedBlockProto*L\n\021ReplicaStateProt" +
|
||||||
"\n\014ReplicaState\022\r\n\tFINALIZED\020\000\022\007\n\003RBW\020\001\022\007" +
|
"o\022\r\n\tFINALIZED\020\000\022\007\n\003RBW\020\001\022\007\n\003RWR\020\002\022\007\n\003RU" +
|
||||||
"\n\003RWR\020\002\022\007\n\003RUR\020\003\022\r\n\tTEMPORARY\020\004B6\n%org.a",
|
"R\020\003\022\r\n\tTEMPORARY\020\004B6\n%org.apache.hadoop.",
|
||||||
"pache.hadoop.hdfs.protocol.protoB\nHdfsPr" +
|
"hdfs.protocol.protoB\nHdfsProtos\240\001\001"
|
||||||
"otos\240\001\001"
|
|
||||||
};
|
};
|
||||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||||
|
|
|
@ -484,9 +484,9 @@ public final class InterDatanodeProtocolProtos {
|
||||||
public interface InitReplicaRecoveryResponseProtoOrBuilder
|
public interface InitReplicaRecoveryResponseProtoOrBuilder
|
||||||
extends com.google.protobuf.MessageOrBuilder {
|
extends com.google.protobuf.MessageOrBuilder {
|
||||||
|
|
||||||
// required .ReplicaState state = 1;
|
// required .ReplicaStateProto state = 1;
|
||||||
boolean hasState();
|
boolean hasState();
|
||||||
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState getState();
|
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto getState();
|
||||||
|
|
||||||
// required .BlockProto block = 2;
|
// required .BlockProto block = 2;
|
||||||
boolean hasBlock();
|
boolean hasBlock();
|
||||||
|
@ -522,13 +522,13 @@ public final class InterDatanodeProtocolProtos {
|
||||||
}
|
}
|
||||||
|
|
||||||
private int bitField0_;
|
private int bitField0_;
|
||||||
// required .ReplicaState state = 1;
|
// required .ReplicaStateProto state = 1;
|
||||||
public static final int STATE_FIELD_NUMBER = 1;
|
public static final int STATE_FIELD_NUMBER = 1;
|
||||||
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState state_;
|
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto state_;
|
||||||
public boolean hasState() {
|
public boolean hasState() {
|
||||||
return ((bitField0_ & 0x00000001) == 0x00000001);
|
return ((bitField0_ & 0x00000001) == 0x00000001);
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState getState() {
|
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto getState() {
|
||||||
return state_;
|
return state_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -546,7 +546,7 @@ public final class InterDatanodeProtocolProtos {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void initFields() {
|
private void initFields() {
|
||||||
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED;
|
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.FINALIZED;
|
||||||
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
||||||
}
|
}
|
||||||
private byte memoizedIsInitialized = -1;
|
private byte memoizedIsInitialized = -1;
|
||||||
|
@ -763,7 +763,7 @@ public final class InterDatanodeProtocolProtos {
|
||||||
|
|
||||||
public Builder clear() {
|
public Builder clear() {
|
||||||
super.clear();
|
super.clear();
|
||||||
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED;
|
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.FINALIZED;
|
||||||
bitField0_ = (bitField0_ & ~0x00000001);
|
bitField0_ = (bitField0_ & ~0x00000001);
|
||||||
if (blockBuilder_ == null) {
|
if (blockBuilder_ == null) {
|
||||||
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
||||||
|
@ -888,7 +888,7 @@ public final class InterDatanodeProtocolProtos {
|
||||||
}
|
}
|
||||||
case 8: {
|
case 8: {
|
||||||
int rawValue = input.readEnum();
|
int rawValue = input.readEnum();
|
||||||
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.valueOf(rawValue);
|
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.valueOf(rawValue);
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
unknownFields.mergeVarintField(1, rawValue);
|
unknownFields.mergeVarintField(1, rawValue);
|
||||||
} else {
|
} else {
|
||||||
|
@ -912,15 +912,15 @@ public final class InterDatanodeProtocolProtos {
|
||||||
|
|
||||||
private int bitField0_;
|
private int bitField0_;
|
||||||
|
|
||||||
// required .ReplicaState state = 1;
|
// required .ReplicaStateProto state = 1;
|
||||||
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED;
|
private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.FINALIZED;
|
||||||
public boolean hasState() {
|
public boolean hasState() {
|
||||||
return ((bitField0_ & 0x00000001) == 0x00000001);
|
return ((bitField0_ & 0x00000001) == 0x00000001);
|
||||||
}
|
}
|
||||||
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState getState() {
|
public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto getState() {
|
||||||
return state_;
|
return state_;
|
||||||
}
|
}
|
||||||
public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState value) {
|
public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto value) {
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
throw new NullPointerException();
|
throw new NullPointerException();
|
||||||
}
|
}
|
||||||
|
@ -931,7 +931,7 @@ public final class InterDatanodeProtocolProtos {
|
||||||
}
|
}
|
||||||
public Builder clearState() {
|
public Builder clearState() {
|
||||||
bitField0_ = (bitField0_ & ~0x00000001);
|
bitField0_ = (bitField0_ & ~0x00000001);
|
||||||
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED;
|
state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto.FINALIZED;
|
||||||
onChanged();
|
onChanged();
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -2448,22 +2448,23 @@ public final class InterDatanodeProtocolProtos {
|
||||||
java.lang.String[] descriptorData = {
|
java.lang.String[] descriptorData = {
|
||||||
"\n\033InterDatanodeProtocol.proto\032\nhdfs.prot" +
|
"\n\033InterDatanodeProtocol.proto\032\nhdfs.prot" +
|
||||||
"o\"G\n\037InitReplicaRecoveryRequestProto\022$\n\005" +
|
"o\"G\n\037InitReplicaRecoveryRequestProto\022$\n\005" +
|
||||||
"block\030\001 \002(\0132\025.RecoveringBlockProto\"\\\n In" +
|
"block\030\001 \002(\0132\025.RecoveringBlockProto\"a\n In" +
|
||||||
"itReplicaRecoveryResponseProto\022\034\n\005state\030" +
|
"itReplicaRecoveryResponseProto\022!\n\005state\030" +
|
||||||
"\001 \002(\0162\r.ReplicaState\022\032\n\005block\030\002 \002(\0132\013.Bl" +
|
"\001 \002(\0162\022.ReplicaStateProto\022\032\n\005block\030\002 \002(\013" +
|
||||||
"ockProto\"s\n&UpdateReplicaUnderRecoveryRe" +
|
"2\013.BlockProto\"s\n&UpdateReplicaUnderRecov" +
|
||||||
"questProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBloc" +
|
"eryRequestProto\022\"\n\005block\030\001 \002(\0132\023.Extende" +
|
||||||
"kProto\022\022\n\nrecoveryId\030\002 \002(\004\022\021\n\tnewLength\030" +
|
"dBlockProto\022\022\n\nrecoveryId\030\002 \002(\004\022\021\n\tnewLe" +
|
||||||
"\003 \002(\004\"M\n\'UpdateReplicaUnderRecoveryRespo" +
|
"ngth\030\003 \002(\004\"M\n\'UpdateReplicaUnderRecovery" +
|
||||||
"nseProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBlockP",
|
"ResponseProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedB",
|
||||||
"roto2\353\001\n\034InterDatanodeProtocolService\022Z\n" +
|
"lockProto2\353\001\n\034InterDatanodeProtocolServi" +
|
||||||
"\023initReplicaRecovery\022 .InitReplicaRecove" +
|
"ce\022Z\n\023initReplicaRecovery\022 .InitReplicaR" +
|
||||||
"ryRequestProto\032!.InitReplicaRecoveryResp" +
|
"ecoveryRequestProto\032!.InitReplicaRecover" +
|
||||||
"onseProto\022o\n\032updateReplicaUnderRecovery\022" +
|
"yResponseProto\022o\n\032updateReplicaUnderReco" +
|
||||||
"\'.UpdateReplicaUnderRecoveryRequestProto" +
|
"very\022\'.UpdateReplicaUnderRecoveryRequest" +
|
||||||
"\032(.UpdateReplicaUnderRecoveryResponsePro" +
|
"Proto\032(.UpdateReplicaUnderRecoveryRespon" +
|
||||||
"toBJ\n%org.apache.hadoop.hdfs.protocol.pr" +
|
"seProtoBJ\n%org.apache.hadoop.hdfs.protoc" +
|
||||||
"otoB\033InterDatanodeProtocolProtos\210\001\001\240\001\001"
|
"ol.protoB\033InterDatanodeProtocolProtos\210\001\001" +
|
||||||
|
"\240\001\001"
|
||||||
};
|
};
|
||||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,49 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
|
||||||
|
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
|
||||||
|
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||||
|
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||||
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
|
import org.apache.hadoop.security.token.TokenInfo;
|
||||||
|
|
||||||
|
@KerberosInfo(
|
||||||
|
serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
|
||||||
|
@TokenInfo(BlockTokenSelector.class)
|
||||||
|
@ProtocolInfo(protocolName =
|
||||||
|
"org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol",
|
||||||
|
protocolVersion = 1)
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public interface ClientDatanodeProtocolPB extends
|
||||||
|
ClientDatanodeProtocolService.BlockingInterface, VersionedProtocol {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method is defined to get the protocol signature using
|
||||||
|
* ProtocolSignatureWritable - suffix of 2 to the method name
|
||||||
|
* avoids conflict.
|
||||||
|
*/
|
||||||
|
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException;
|
||||||
|
}
|
|
@ -0,0 +1,159 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||||
|
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||||
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||||
|
|
||||||
|
import com.google.protobuf.RpcController;
|
||||||
|
import com.google.protobuf.ServiceException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation for protobuf service that forwards requests
|
||||||
|
* received on {@link ClientDatanodeProtocolPB} to the
|
||||||
|
* {@link ClientDatanodeProtocol} server implementation.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public class ClientDatanodeProtocolServerSideTranslatorPB implements
|
||||||
|
ClientDatanodeProtocolPB {
|
||||||
|
private final static RefreshNamenodesResponseProto REFRESH_NAMENODE_RESP =
|
||||||
|
RefreshNamenodesResponseProto.newBuilder().build();
|
||||||
|
private final static DeleteBlockPoolResponseProto DELETE_BLOCKPOOL_RESP =
|
||||||
|
DeleteBlockPoolResponseProto.newBuilder().build();
|
||||||
|
|
||||||
|
private final ClientDatanodeProtocol impl;
|
||||||
|
|
||||||
|
public ClientDatanodeProtocolServerSideTranslatorPB(
|
||||||
|
ClientDatanodeProtocol impl) {
|
||||||
|
this.impl = impl;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public GetReplicaVisibleLengthResponseProto getReplicaVisibleLength(
|
||||||
|
RpcController unused, GetReplicaVisibleLengthRequestProto request)
|
||||||
|
throws ServiceException {
|
||||||
|
long len;
|
||||||
|
try {
|
||||||
|
len = impl.getReplicaVisibleLength(PBHelper.convert(request.getBlock()));
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return GetReplicaVisibleLengthResponseProto.newBuilder().setLength(len)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public RefreshNamenodesResponseProto refreshNamenode(
|
||||||
|
RpcController unused, RefreshNamenodesRequestProto request)
|
||||||
|
throws ServiceException {
|
||||||
|
try {
|
||||||
|
impl.refreshNamenodes();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return REFRESH_NAMENODE_RESP;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DeleteBlockPoolResponseProto deleteBlockPool(RpcController unused,
|
||||||
|
DeleteBlockPoolRequestProto request) throws ServiceException {
|
||||||
|
try {
|
||||||
|
impl.deleteBlockPool(request.getBlockPool(), request.getForce());
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return DELETE_BLOCKPOOL_RESP;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo(
|
||||||
|
RpcController unused, GetBlockLocalPathInfoRequestProto request)
|
||||||
|
throws ServiceException {
|
||||||
|
BlockLocalPathInfo resp;
|
||||||
|
try {
|
||||||
|
resp = impl.getBlockLocalPathInfo(PBHelper.convert(request.getBlock()), PBHelper.convert(request.getToken()));
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return GetBlockLocalPathInfoResponseProto.newBuilder()
|
||||||
|
.setBlock(PBHelper.convert(resp.getBlock()))
|
||||||
|
.setLocalPath(resp.getBlockPath()).setLocalMetaPath(resp.getMetaPath())
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getProtocolVersion(String protocol, long clientVersion)
|
||||||
|
throws IOException {
|
||||||
|
return RPC.getProtocolVersion(ClientDatanodeProtocolPB.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The client side will redirect getProtocolSignature to
|
||||||
|
* getProtocolSignature2.
|
||||||
|
*
|
||||||
|
* However the RPC layer below on the Server side will call getProtocolVersion
|
||||||
|
* and possibly in the future getProtocolSignature. Hence we still implement
|
||||||
|
* it even though the end client will never call this method.
|
||||||
|
*
|
||||||
|
* @see VersionedProtocol#getProtocolVersion
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public ProtocolSignature getProtocolSignature(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException {
|
||||||
|
/**
|
||||||
|
* Don't forward this to the server. The protocol version and signature is
|
||||||
|
* that of {@link ClientDatanodeProtocol}
|
||||||
|
*/
|
||||||
|
if (!protocol.equals(RPC.getProtocolName(ClientDatanodeProtocol.class))) {
|
||||||
|
throw new IOException("Namenode Serverside implements " +
|
||||||
|
RPC.getProtocolName(ClientDatanodeProtocol.class) +
|
||||||
|
". The following requested protocol is unknown: " + protocol);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
||||||
|
RPC.getProtocolVersion(ClientDatanodeProtocolPB.class),
|
||||||
|
ClientDatanodeProtocolPB.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException {
|
||||||
|
/**
|
||||||
|
* Don't forward this to the server. The protocol version and signature is
|
||||||
|
* that of {@link ClientDatanodeProtocol}
|
||||||
|
*/
|
||||||
|
return ProtocolSignatureWritable.convert(
|
||||||
|
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,136 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
|
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||||
|
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||||
|
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||||
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
|
||||||
|
import com.google.protobuf.RpcController;
|
||||||
|
import com.google.protobuf.ServiceException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This class is the client side translator to translate the requests made on
|
||||||
|
* {@link ClientDatanodeProtocol} interfaces to the RPC server implementing
|
||||||
|
* {@link ClientDatanodeProtocolPB}.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Stable
|
||||||
|
public class ClientDatanodeProtocolTranslatorPB implements
|
||||||
|
ClientDatanodeProtocol, Closeable {
|
||||||
|
/** RpcController is not used and hence is set to null */
|
||||||
|
private final static RpcController NULL_CONTROLLER = null;
|
||||||
|
private final ClientDatanodeProtocolPB rpcProxy;
|
||||||
|
private final static RefreshNamenodesRequestProto REFRESH_NAMENODES =
|
||||||
|
RefreshNamenodesRequestProto.newBuilder().build();
|
||||||
|
|
||||||
|
|
||||||
|
public ClientDatanodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr,
|
||||||
|
Configuration conf) throws IOException {
|
||||||
|
RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class,
|
||||||
|
ProtobufRpcEngine.class);
|
||||||
|
rpcProxy = RPC.getProxy(ClientDatanodeProtocolPB.class,
|
||||||
|
RPC.getProtocolVersion(ClientDatanodeProtocolPB.class), nameNodeAddr,
|
||||||
|
conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() {
|
||||||
|
RPC.stopProxy(rpcProxy);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getProtocolVersion(String protocolName, long clientVersion)
|
||||||
|
throws IOException {
|
||||||
|
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ProtocolSignature getProtocolSignature(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException {
|
||||||
|
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
|
||||||
|
protocol, clientVersion, clientMethodsHash));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getReplicaVisibleLength(ExtendedBlock b) throws IOException {
|
||||||
|
GetReplicaVisibleLengthRequestProto req = GetReplicaVisibleLengthRequestProto
|
||||||
|
.newBuilder().setBlock(PBHelper.convert(b)).build();
|
||||||
|
try {
|
||||||
|
return rpcProxy.getReplicaVisibleLength(NULL_CONTROLLER, req).getLength();
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void refreshNamenodes() throws IOException {
|
||||||
|
try {
|
||||||
|
rpcProxy.refreshNamenode(NULL_CONTROLLER, REFRESH_NAMENODES);
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void deleteBlockPool(String bpid, boolean force) throws IOException {
|
||||||
|
DeleteBlockPoolRequestProto req = DeleteBlockPoolRequestProto.newBuilder()
|
||||||
|
.setBlockPool(bpid).setForce(force).build();
|
||||||
|
try {
|
||||||
|
rpcProxy.deleteBlockPool(NULL_CONTROLLER, req);
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
|
||||||
|
Token<BlockTokenIdentifier> token) throws IOException {
|
||||||
|
GetBlockLocalPathInfoRequestProto req =
|
||||||
|
GetBlockLocalPathInfoRequestProto.newBuilder()
|
||||||
|
.setBlock(PBHelper.convert(block))
|
||||||
|
.setToken(PBHelper.convert(token)).build();
|
||||||
|
GetBlockLocalPathInfoResponseProto resp;
|
||||||
|
try {
|
||||||
|
resp = rpcProxy.getBlockLocalPathInfo(NULL_CONTROLLER, req);
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
return new BlockLocalPathInfo(PBHelper.convert(resp.getBlock()),
|
||||||
|
resp.getLocalPath(), resp.getLocalMetaPath());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService;
|
||||||
|
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||||
|
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||||
|
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||||
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
|
|
||||||
|
@KerberosInfo(
|
||||||
|
serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY,
|
||||||
|
clientPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
|
||||||
|
@ProtocolInfo(protocolName =
|
||||||
|
"org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol",
|
||||||
|
protocolVersion = 1)
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public interface InterDatanodeProtocolPB extends
|
||||||
|
InterDatanodeProtocolService.BlockingInterface, VersionedProtocol {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method is defined to get the protocol signature using
|
||||||
|
* the R23 protocol - hence we have added the suffix of 2 the method name
|
||||||
|
* to avoid conflict.
|
||||||
|
*/
|
||||||
|
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException;
|
||||||
|
}
|
|
@ -0,0 +1,130 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||||
|
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||||
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||||
|
|
||||||
|
import com.google.protobuf.RpcController;
|
||||||
|
import com.google.protobuf.ServiceException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation for protobuf service that forwards requests
|
||||||
|
* received on {@link InterDatanodeProtocolPB} to the
|
||||||
|
* {@link InterDatanodeProtocol} server implementation.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public class InterDatanodeProtocolServerSideTranslatorPB implements
|
||||||
|
InterDatanodeProtocolPB {
|
||||||
|
private final InterDatanodeProtocol impl;
|
||||||
|
|
||||||
|
public InterDatanodeProtocolServerSideTranslatorPB(InterDatanodeProtocol impl) {
|
||||||
|
this.impl = impl;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InitReplicaRecoveryResponseProto initReplicaRecovery(
|
||||||
|
RpcController unused, InitReplicaRecoveryRequestProto request)
|
||||||
|
throws ServiceException {
|
||||||
|
RecoveringBlock b = PBHelper.convert(request.getBlock());
|
||||||
|
ReplicaRecoveryInfo r;
|
||||||
|
try {
|
||||||
|
r = impl.initReplicaRecovery(b);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return InitReplicaRecoveryResponseProto.newBuilder()
|
||||||
|
.setBlock(PBHelper.convert(r)).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public UpdateReplicaUnderRecoveryResponseProto updateReplicaUnderRecovery(
|
||||||
|
RpcController unused, UpdateReplicaUnderRecoveryRequestProto request)
|
||||||
|
throws ServiceException {
|
||||||
|
ExtendedBlock b;
|
||||||
|
try {
|
||||||
|
b = impl.updateReplicaUnderRecovery(PBHelper.convert(request.getBlock()),
|
||||||
|
request.getRecoveryId(), request.getNewLength());
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return UpdateReplicaUnderRecoveryResponseProto.newBuilder()
|
||||||
|
.setBlock(PBHelper.convert(b)).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @see VersionedProtocol#getProtocolVersion */
|
||||||
|
@Override
|
||||||
|
public long getProtocolVersion(String protocol, long clientVersion)
|
||||||
|
throws IOException {
|
||||||
|
return RPC.getProtocolVersion(InterDatanodeProtocolPB.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The client side will redirect getProtocolSignature to
|
||||||
|
* getProtocolSignature2.
|
||||||
|
*
|
||||||
|
* However the RPC layer below on the Server side will call getProtocolVersion
|
||||||
|
* and possibly in the future getProtocolSignature. Hence we still implement
|
||||||
|
* it even though the end client will never call this method.
|
||||||
|
*
|
||||||
|
* @see VersionedProtocol#getProtocolVersion
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public ProtocolSignature getProtocolSignature(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException {
|
||||||
|
/**
|
||||||
|
* Don't forward this to the server. The protocol version and signature is
|
||||||
|
* that of {@link InterDatanodeProtocol}
|
||||||
|
*/
|
||||||
|
if (!protocol.equals(RPC.getProtocolName(InterDatanodeProtocol.class))) {
|
||||||
|
throw new IOException("Namenode Serverside implements " +
|
||||||
|
RPC.getProtocolName(InterDatanodeProtocol.class) +
|
||||||
|
". The following requested protocol is unknown: " + protocol);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
||||||
|
RPC.getProtocolVersion(InterDatanodeProtocolPB.class),
|
||||||
|
InterDatanodeProtocolPB.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException {
|
||||||
|
/**
|
||||||
|
* Don't forward this to the server. The protocol version and signature is
|
||||||
|
* that of {@link InterDatanodeProtocol}
|
||||||
|
*/
|
||||||
|
return ProtocolSignatureWritable.convert(
|
||||||
|
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,114 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||||
|
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||||
|
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||||
|
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||||
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
|
||||||
|
import com.google.protobuf.RpcController;
|
||||||
|
import com.google.protobuf.ServiceException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This class is the client side translator to translate the requests made on
|
||||||
|
* {@link InterDatanodeProtocol} interfaces to the RPC server implementing
|
||||||
|
* {@link InterDatanodeProtocolPB}.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Stable
|
||||||
|
public class InterDatanodeProtocolTranslatorPB implements
|
||||||
|
InterDatanodeProtocol, Closeable {
|
||||||
|
/** RpcController is not used and hence is set to null */
|
||||||
|
private final static RpcController NULL_CONTROLLER = null;
|
||||||
|
final private InterDatanodeProtocolPB rpcProxy;
|
||||||
|
|
||||||
|
public InterDatanodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr,
|
||||||
|
Configuration conf) throws IOException {
|
||||||
|
RPC.setProtocolEngine(conf, InterDatanodeProtocolPB.class,
|
||||||
|
ProtobufRpcEngine.class);
|
||||||
|
rpcProxy = RPC.getProxy(InterDatanodeProtocolPB.class,
|
||||||
|
RPC.getProtocolVersion(InterDatanodeProtocolPB.class), nameNodeAddr,
|
||||||
|
conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() {
|
||||||
|
RPC.stopProxy(rpcProxy);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getProtocolVersion(String protocolName, long clientVersion)
|
||||||
|
throws IOException {
|
||||||
|
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ProtocolSignature getProtocolSignature(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException {
|
||||||
|
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
|
||||||
|
protocol, clientVersion, clientMethodsHash));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
|
||||||
|
throws IOException {
|
||||||
|
InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
|
||||||
|
.newBuilder().setBlock(PBHelper.convert(rBlock)).build();
|
||||||
|
InitReplicaRecoveryResponseProto resp;
|
||||||
|
try {
|
||||||
|
resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
BlockProto b = resp.getBlock();
|
||||||
|
return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
|
||||||
|
b.getGenStamp(), PBHelper.convert(resp.getState()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ExtendedBlock updateReplicaUnderRecovery(ExtendedBlock oldBlock,
|
||||||
|
long recoveryId, long newLength) throws IOException {
|
||||||
|
UpdateReplicaUnderRecoveryRequestProto req =
|
||||||
|
UpdateReplicaUnderRecoveryRequestProto.newBuilder()
|
||||||
|
.setBlock(PBHelper.convert(oldBlock))
|
||||||
|
.setNewLength(newLength).setRecoveryId(recoveryId).build();
|
||||||
|
try {
|
||||||
|
return PBHelper.convert(rpcProxy.updateReplicaUnderRecovery(
|
||||||
|
NULL_CONTROLLER, req).getBlock());
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto;
|
||||||
|
@ -37,6 +38,7 @@ import com.google.protobuf.ServiceException;
|
||||||
* received on {@link JournalProtocolPB} to the
|
* received on {@link JournalProtocolPB} to the
|
||||||
* {@link JournalProtocol} server implementation.
|
* {@link JournalProtocol} server implementation.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB {
|
public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB {
|
||||||
/** Server side implementation to delegate the requests to */
|
/** Server side implementation to delegate the requests to */
|
||||||
private final JournalProtocol impl;
|
private final JournalProtocol impl;
|
||||||
|
|
|
@ -53,7 +53,7 @@ public class JournalProtocolTranslatorPB implements JournalProtocol, Closeable {
|
||||||
Configuration conf) throws IOException {
|
Configuration conf) throws IOException {
|
||||||
RPC.setProtocolEngine(conf, JournalProtocolPB.class, ProtobufRpcEngine.class);
|
RPC.setProtocolEngine(conf, JournalProtocolPB.class, ProtobufRpcEngine.class);
|
||||||
rpcProxy = RPC.getProxy(JournalProtocolPB.class,
|
rpcProxy = RPC.getProxy(JournalProtocolPB.class,
|
||||||
JournalProtocol.versionID, nameNodeAddr, conf);
|
RPC.getProtocolVersion(JournalProtocolPB.class), nameNodeAddr, conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -64,7 +64,7 @@ public class JournalProtocolTranslatorPB implements JournalProtocol, Closeable {
|
||||||
@Override
|
@Override
|
||||||
public long getProtocolVersion(String protocolName, long clientVersion)
|
public long getProtocolVersion(String protocolName, long clientVersion)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return 0;
|
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
|
||||||
|
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||||
|
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||||
|
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||||
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protocol that a secondary NameNode uses to communicate with the NameNode.
|
||||||
|
* It's used to get part of the name node state
|
||||||
|
*
|
||||||
|
* Note: This extends the protocolbuffer service based interface to
|
||||||
|
* add annotations required for security.
|
||||||
|
*/
|
||||||
|
@KerberosInfo(
|
||||||
|
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
|
||||||
|
clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
|
||||||
|
@ProtocolInfo(protocolName =
|
||||||
|
"org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol",
|
||||||
|
protocolVersion = 1)
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public interface NamenodeProtocolPB extends
|
||||||
|
NamenodeProtocolService.BlockingInterface, VersionedProtocol {
|
||||||
|
/**
|
||||||
|
* This method is defined to get the protocol signature using
|
||||||
|
* the R23 protocol - hence we have added the suffix of 2 the method name
|
||||||
|
* to avoid conflict.
|
||||||
|
*/
|
||||||
|
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException;
|
||||||
|
}
|
|
@ -0,0 +1,253 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||||
|
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||||
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
|
||||||
|
import com.google.protobuf.RpcController;
|
||||||
|
import com.google.protobuf.ServiceException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation for protobuf service that forwards requests
|
||||||
|
* received on {@link NamenodeProtocolPB} to the
|
||||||
|
* {@link NamenodeProtocol} server implementation.
|
||||||
|
*/
|
||||||
|
public class NamenodeProtocolServerSideTranslatorPB implements
|
||||||
|
NamenodeProtocolPB {
|
||||||
|
private final NamenodeProtocol impl;
|
||||||
|
|
||||||
|
public NamenodeProtocolServerSideTranslatorPB(NamenodeProtocol impl) {
|
||||||
|
this.impl = impl;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public GetBlocksResponseProto getBlocks(RpcController unused,
|
||||||
|
GetBlocksRequestProto request) throws ServiceException {
|
||||||
|
DatanodeInfo dnInfo = new DatanodeInfo(PBHelper.convert(request
|
||||||
|
.getDatanode()));
|
||||||
|
BlocksWithLocations blocks;
|
||||||
|
try {
|
||||||
|
blocks = impl.getBlocks(dnInfo, request.getSize());
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return GetBlocksResponseProto.newBuilder()
|
||||||
|
.setBlocks(PBHelper.convert(blocks)).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
|
||||||
|
GetBlockKeysRequestProto request) throws ServiceException {
|
||||||
|
ExportedBlockKeys keys;
|
||||||
|
try {
|
||||||
|
keys = impl.getBlockKeys();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return GetBlockKeysResponseProto.newBuilder()
|
||||||
|
.setKeys(PBHelper.convert(keys)).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public GetTransactionIdResponseProto getTransationId(RpcController unused,
|
||||||
|
GetTransactionIdRequestProto request) throws ServiceException {
|
||||||
|
long txid;
|
||||||
|
try {
|
||||||
|
txid = impl.getTransactionID();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return GetTransactionIdResponseProto.newBuilder().setTxId(txid).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public RollEditLogResponseProto rollEditLog(RpcController unused,
|
||||||
|
RollEditLogRequestProto request) throws ServiceException {
|
||||||
|
CheckpointSignature signature;
|
||||||
|
try {
|
||||||
|
signature = impl.rollEditLog();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return RollEditLogResponseProto.newBuilder()
|
||||||
|
.setSignature(PBHelper.convert(signature)).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ErrorReportResponseProto errorReport(RpcController unused,
|
||||||
|
ErrorReportRequestProto request) throws ServiceException {
|
||||||
|
try {
|
||||||
|
impl.errorReport(PBHelper.convert(request.getRegistration()),
|
||||||
|
request.getErrorCode(), request.getMsg());
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return ErrorReportResponseProto.newBuilder().build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public RegisterResponseProto register(RpcController unused,
|
||||||
|
RegisterRequestProto request) throws ServiceException {
|
||||||
|
NamenodeRegistration reg;
|
||||||
|
try {
|
||||||
|
reg = impl.register(PBHelper.convert(request.getRegistration()));
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return RegisterResponseProto.newBuilder()
|
||||||
|
.setRegistration(PBHelper.convert(reg)).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public StartCheckpointResponseProto startCheckpoint(RpcController unused,
|
||||||
|
StartCheckpointRequestProto request) throws ServiceException {
|
||||||
|
NamenodeCommand cmd;
|
||||||
|
try {
|
||||||
|
cmd = impl.startCheckpoint(PBHelper.convert(request.getRegistration()));
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return StartCheckpointResponseProto.newBuilder()
|
||||||
|
.setCommand(PBHelper.convert(cmd)).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public EndCheckpointResponseProto endCheckpoint(RpcController unused,
|
||||||
|
EndCheckpointRequestProto request) throws ServiceException {
|
||||||
|
try {
|
||||||
|
impl.endCheckpoint(PBHelper.convert(request.getRegistration()),
|
||||||
|
PBHelper.convert(request.getSignature()));
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return EndCheckpointResponseProto.newBuilder().build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public GetEditLogManifestResponseProto getEditLogManifest(
|
||||||
|
RpcController unused, GetEditLogManifestRequestProto request)
|
||||||
|
throws ServiceException {
|
||||||
|
RemoteEditLogManifest manifest;
|
||||||
|
try {
|
||||||
|
manifest = impl.getEditLogManifest(request.getSinceTxId());
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return GetEditLogManifestResponseProto.newBuilder()
|
||||||
|
.setManifest(PBHelper.convert(manifest)).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getProtocolVersion(String protocol, long clientVersion)
|
||||||
|
throws IOException {
|
||||||
|
return RPC.getProtocolVersion(NamenodeProtocolPB.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The client side will redirect getProtocolSignature to
|
||||||
|
* getProtocolSignature2.
|
||||||
|
*
|
||||||
|
* However the RPC layer below on the Server side will call getProtocolVersion
|
||||||
|
* and possibly in the future getProtocolSignature. Hence we still implement
|
||||||
|
* it even though the end client will never call this method.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public ProtocolSignature getProtocolSignature(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException {
|
||||||
|
/**
|
||||||
|
* Don't forward this to the server. The protocol version and signature is
|
||||||
|
* that of {@link NamenodeProtocol}
|
||||||
|
*/
|
||||||
|
if (!protocol.equals(RPC.getProtocolName(NamenodeProtocolPB.class))) {
|
||||||
|
throw new IOException("Namenode Serverside implements " +
|
||||||
|
RPC.getProtocolName(NamenodeProtocolPB.class) +
|
||||||
|
". The following requested protocol is unknown: " + protocol);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
||||||
|
RPC.getProtocolVersion(NamenodeProtocolPB.class),
|
||||||
|
NamenodeProtocolPB.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ProtocolSignatureWritable getProtocolSignature2(String protocol,
|
||||||
|
long clientVersion, int clientMethodsHash) throws IOException {
|
||||||
|
/**
|
||||||
|
* Don't forward this to the server. The protocol version and signature is
|
||||||
|
* that of {@link NamenodePBProtocol}
|
||||||
|
*/
|
||||||
|
return ProtocolSignatureWritable.convert(
|
||||||
|
this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public VersionResponseProto versionRequest(RpcController controller,
|
||||||
|
VersionRequestProto request) throws ServiceException {
|
||||||
|
NamespaceInfo info;
|
||||||
|
try {
|
||||||
|
info = impl.versionRequest();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return VersionResponseProto.newBuilder()
|
||||||
|
.setInfo(convert(info)).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private NamespaceInfoProto convert(NamespaceInfo info) {
|
||||||
|
return NamespaceInfoProto.newBuilder()
|
||||||
|
.setBlockPoolID(info.getBlockPoolID())
|
||||||
|
.setBuildVersion(info.getBuildVersion())
|
||||||
|
.setDistUpgradeVersion(info.getDistributedUpgradeVersion())
|
||||||
|
.setStorageInfo(PBHelper.convert(info)).build();
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,270 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||||
|
import org.apache.hadoop.io.retry.RetryPolicies;
|
||||||
|
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||||
|
import org.apache.hadoop.io.retry.RetryProxy;
|
||||||
|
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||||
|
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||||
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
|
||||||
|
import com.google.protobuf.RpcController;
|
||||||
|
import com.google.protobuf.ServiceException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This class is the client side translator to translate the requests made on
|
||||||
|
* {@link NamenodeProtocol} interfaces to the RPC server implementing
|
||||||
|
* {@link NamenodeProtocolPB}.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Stable
|
||||||
|
public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
|
||||||
|
Closeable {
|
||||||
|
/** RpcController is not used and hence is set to null */
|
||||||
|
private final static RpcController NULL_CONTROLLER = null;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Protobuf requests with no parameters instantiated only once
|
||||||
|
*/
|
||||||
|
private static final GetBlockKeysRequestProto GET_BLOCKKEYS =
|
||||||
|
GetBlockKeysRequestProto.newBuilder().build();
|
||||||
|
private static final GetTransactionIdRequestProto GET_TRANSACTIONID =
|
||||||
|
GetTransactionIdRequestProto.newBuilder().build();
|
||||||
|
private static final RollEditLogRequestProto ROLL_EDITLOG =
|
||||||
|
RollEditLogRequestProto.newBuilder().build();
|
||||||
|
private static final VersionRequestProto VERSION_REQUEST =
|
||||||
|
VersionRequestProto.newBuilder().build();
|
||||||
|
|
||||||
|
final private NamenodeProtocolPB rpcProxy;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
private static NamenodeProtocolPB createNamenode(
|
||||||
|
InetSocketAddress nameNodeAddr, Configuration conf,
|
||||||
|
UserGroupInformation ugi) throws IOException {
|
||||||
|
return RPC.getProxy(NamenodeProtocolPB.class,
|
||||||
|
RPC.getProtocolVersion(NamenodeProtocolPB.class), nameNodeAddr, ugi,
|
||||||
|
conf, NetUtils.getSocketFactory(conf, NamenodeProtocolPB.class));
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Create a {@link NameNode} proxy */
|
||||||
|
static NamenodeProtocolPB createNamenodeWithRetry(
|
||||||
|
NamenodeProtocolPB rpcNamenode) {
|
||||||
|
RetryPolicy createPolicy = RetryPolicies
|
||||||
|
.retryUpToMaximumCountWithFixedSleep(5,
|
||||||
|
HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
|
||||||
|
Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap = new HashMap<Class<? extends Exception>, RetryPolicy>();
|
||||||
|
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
|
||||||
|
createPolicy);
|
||||||
|
|
||||||
|
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<Class<? extends Exception>, RetryPolicy>();
|
||||||
|
exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
|
||||||
|
.retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
|
||||||
|
remoteExceptionToPolicyMap));
|
||||||
|
RetryPolicy methodPolicy = RetryPolicies.retryByException(
|
||||||
|
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
|
||||||
|
Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
|
||||||
|
|
||||||
|
methodNameToPolicyMap.put("create", methodPolicy);
|
||||||
|
|
||||||
|
return (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class,
|
||||||
|
rpcNamenode, methodNameToPolicyMap);
|
||||||
|
}
|
||||||
|
|
||||||
|
public NamenodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr,
|
||||||
|
Configuration conf, UserGroupInformation ugi) throws IOException {
|
||||||
|
rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void close() {
|
||||||
|
RPC.stopProxy(rpcProxy);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ProtocolSignature getProtocolSignature(String protocolName,
|
||||||
|
long clientVersion, int clientMethodHash) throws IOException {
|
||||||
|
return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
|
||||||
|
protocolName, clientVersion, clientMethodHash));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getProtocolVersion(String protocolName, long clientVersion)
|
||||||
|
throws IOException {
|
||||||
|
return rpcProxy.getProtocolVersion(protocolName, clientVersion);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
|
||||||
|
throws IOException {
|
||||||
|
GetBlocksRequestProto req = GetBlocksRequestProto.newBuilder()
|
||||||
|
.setDatanode(PBHelper.convert((DatanodeID)datanode)).setSize(size)
|
||||||
|
.build();
|
||||||
|
try {
|
||||||
|
return PBHelper.convert(rpcProxy.getBlocks(NULL_CONTROLLER, req)
|
||||||
|
.getBlocks());
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ExportedBlockKeys getBlockKeys() throws IOException {
|
||||||
|
try {
|
||||||
|
return PBHelper.convert(rpcProxy.getBlockKeys(NULL_CONTROLLER,
|
||||||
|
GET_BLOCKKEYS).getKeys());
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getTransactionID() throws IOException {
|
||||||
|
try {
|
||||||
|
return rpcProxy.getTransationId(NULL_CONTROLLER, GET_TRANSACTIONID)
|
||||||
|
.getTxId();
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
@SuppressWarnings("deprecation")
|
||||||
|
public CheckpointSignature rollEditLog() throws IOException {
|
||||||
|
try {
|
||||||
|
return PBHelper.convert(rpcProxy.rollEditLog(NULL_CONTROLLER,
|
||||||
|
ROLL_EDITLOG).getSignature());
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public NamespaceInfo versionRequest() throws IOException {
|
||||||
|
try {
|
||||||
|
return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
|
||||||
|
VERSION_REQUEST).getInfo());
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void errorReport(NamenodeRegistration registration, int errorCode,
|
||||||
|
String msg) throws IOException {
|
||||||
|
ErrorReportRequestProto req = ErrorReportRequestProto.newBuilder()
|
||||||
|
.setErrorCode(errorCode).setMsg(msg)
|
||||||
|
.setRegistration(PBHelper.convert(registration)).build();
|
||||||
|
try {
|
||||||
|
rpcProxy.errorReport(NULL_CONTROLLER, req);
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public NamenodeRegistration register(NamenodeRegistration registration)
|
||||||
|
throws IOException {
|
||||||
|
RegisterRequestProto req = RegisterRequestProto.newBuilder()
|
||||||
|
.setRegistration(PBHelper.convert(registration)).build();
|
||||||
|
try {
|
||||||
|
return PBHelper.convert(rpcProxy.register(NULL_CONTROLLER, req)
|
||||||
|
.getRegistration());
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
|
||||||
|
throws IOException {
|
||||||
|
StartCheckpointRequestProto req = StartCheckpointRequestProto.newBuilder()
|
||||||
|
.setRegistration(PBHelper.convert(registration)).build();
|
||||||
|
NamenodeCommandProto cmd;
|
||||||
|
try {
|
||||||
|
cmd = rpcProxy.startCheckpoint(NULL_CONTROLLER, req).getCommand();
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
return PBHelper.convert(cmd);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void endCheckpoint(NamenodeRegistration registration,
|
||||||
|
CheckpointSignature sig) throws IOException {
|
||||||
|
EndCheckpointRequestProto req = EndCheckpointRequestProto.newBuilder()
|
||||||
|
.setRegistration(PBHelper.convert(registration))
|
||||||
|
.setSignature(PBHelper.convert(sig)).build();
|
||||||
|
try {
|
||||||
|
rpcProxy.endCheckpoint(NULL_CONTROLLER, req);
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
|
||||||
|
throws IOException {
|
||||||
|
GetEditLogManifestRequestProto req = GetEditLogManifestRequestProto
|
||||||
|
.newBuilder().setSinceTxId(sinceTxId).build();
|
||||||
|
try {
|
||||||
|
return PBHelper.convert(rpcProxy.getEditLogManifest(NULL_CONTROLLER, req)
|
||||||
|
.getManifest());
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -17,18 +17,62 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.protocolPB;
|
package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.BlockKey;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
|
||||||
import com.google.protobuf.ByteString;
|
import com.google.protobuf.ByteString;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utilities for converting protobuf classes to and from
|
* Utilities for converting protobuf classes to and from implementation classes.
|
||||||
* implementation classes.
|
|
||||||
*/
|
*/
|
||||||
class PBHelper {
|
class PBHelper {
|
||||||
private PBHelper() {
|
private PBHelper() {
|
||||||
|
@ -65,10 +109,8 @@ class PBHelper {
|
||||||
|
|
||||||
public static StorageInfoProto convert(StorageInfo info) {
|
public static StorageInfoProto convert(StorageInfo info) {
|
||||||
return StorageInfoProto.newBuilder().setClusterID(info.getClusterID())
|
return StorageInfoProto.newBuilder().setClusterID(info.getClusterID())
|
||||||
.setCTime(info.getCTime())
|
.setCTime(info.getCTime()).setLayoutVersion(info.getLayoutVersion())
|
||||||
.setLayoutVersion(info.getLayoutVersion())
|
.setNamespceID(info.getNamespaceID()).build();
|
||||||
.setNamespceID(info.getNamespaceID())
|
|
||||||
.build();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static StorageInfo convert(StorageInfoProto info) {
|
public static StorageInfo convert(StorageInfoProto info) {
|
||||||
|
@ -76,11 +118,9 @@ class PBHelper {
|
||||||
info.getClusterID(), info.getCTime());
|
info.getClusterID(), info.getCTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public static NamenodeRegistrationProto convert(NamenodeRegistration reg) {
|
public static NamenodeRegistrationProto convert(NamenodeRegistration reg) {
|
||||||
return NamenodeRegistrationProto.newBuilder()
|
return NamenodeRegistrationProto.newBuilder()
|
||||||
.setHttpAddress(reg.getHttpAddress())
|
.setHttpAddress(reg.getHttpAddress()).setRole(convert(reg.getRole()))
|
||||||
.setRole(convert(reg.getRole()))
|
|
||||||
.setRpcAddress(reg.getAddress())
|
.setRpcAddress(reg.getAddress())
|
||||||
.setStorageInfo(convert((StorageInfo) reg)).build();
|
.setStorageInfo(convert((StorageInfo) reg)).build();
|
||||||
}
|
}
|
||||||
|
@ -89,4 +129,292 @@ class PBHelper {
|
||||||
return new NamenodeRegistration(reg.getRpcAddress(), reg.getHttpAddress(),
|
return new NamenodeRegistration(reg.getRpcAddress(), reg.getHttpAddress(),
|
||||||
convert(reg.getStorageInfo()), convert(reg.getRole()));
|
convert(reg.getStorageInfo()), convert(reg.getRole()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static DatanodeID convert(DatanodeIDProto dn) {
|
||||||
|
return new DatanodeID(dn.getName(), dn.getStorageID(), dn.getInfoPort(),
|
||||||
|
dn.getIpcPort());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static DatanodeIDProto convert(DatanodeID dn) {
|
||||||
|
return DatanodeIDProto.newBuilder().setName(dn.getName())
|
||||||
|
.setInfoPort(dn.getInfoPort()).setIpcPort(dn.getIpcPort())
|
||||||
|
.setStorageID(dn.getStorageID()).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static BlockProto convert(Block b) {
|
||||||
|
return BlockProto.newBuilder().setBlockId(b.getBlockId())
|
||||||
|
.setGenStamp(b.getGenerationStamp()).setNumBytes(b.getNumBytes())
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Block convert(BlockProto b) {
|
||||||
|
return new Block(b.getBlockId(), b.getGenStamp(), b.getNumBytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static BlockWithLocationsProto convert(BlockWithLocations blk) {
|
||||||
|
return BlockWithLocationsProto.newBuilder()
|
||||||
|
.setBlock(convert(blk.getBlock()))
|
||||||
|
.addAllDatanodeIDs(Arrays.asList(blk.getDatanodes())).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static BlockWithLocations convert(BlockWithLocationsProto b) {
|
||||||
|
return new BlockWithLocations(convert(b.getBlock()), b.getDatanodeIDsList()
|
||||||
|
.toArray(new String[0]));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
|
||||||
|
BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto
|
||||||
|
.newBuilder();
|
||||||
|
for (BlockWithLocations b : blks.getBlocks()) {
|
||||||
|
builder.addBlocks(convert(b));
|
||||||
|
}
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) {
|
||||||
|
List<BlockWithLocationsProto> b = blocks.getBlocksList();
|
||||||
|
BlockWithLocations[] ret = new BlockWithLocations[b.size()];
|
||||||
|
int i = 0;
|
||||||
|
for (BlockWithLocationsProto entry : b) {
|
||||||
|
ret[i++] = convert(entry);
|
||||||
|
}
|
||||||
|
return new BlocksWithLocations(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static BlockKeyProto convert(BlockKey key) {
|
||||||
|
byte[] encodedKey = key.getEncodedKey();
|
||||||
|
ByteString keyBytes = ByteString.copyFrom(encodedKey == null ? new byte[0]
|
||||||
|
: encodedKey);
|
||||||
|
return BlockKeyProto.newBuilder().setKeyId(key.getKeyId())
|
||||||
|
.setKeyBytes(keyBytes).setExpiryDate(key.getExpiryDate()).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static BlockKey convert(BlockKeyProto k) {
|
||||||
|
return new BlockKey(k.getKeyId(), k.getExpiryDate(), k.getKeyBytes()
|
||||||
|
.toByteArray());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ExportedBlockKeysProto convert(ExportedBlockKeys keys) {
|
||||||
|
ExportedBlockKeysProto.Builder builder = ExportedBlockKeysProto
|
||||||
|
.newBuilder();
|
||||||
|
builder.setIsBlockTokenEnabled(keys.isBlockTokenEnabled())
|
||||||
|
.setKeyUpdateInterval(keys.getKeyUpdateInterval())
|
||||||
|
.setTokenLifeTime(keys.getTokenLifetime())
|
||||||
|
.setCurrentKey(convert(keys.getCurrentKey()));
|
||||||
|
for (BlockKey k : keys.getAllKeys()) {
|
||||||
|
builder.addAllKeys(convert(k));
|
||||||
|
}
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ExportedBlockKeys convert(ExportedBlockKeysProto keys) {
|
||||||
|
return new ExportedBlockKeys(keys.getIsBlockTokenEnabled(),
|
||||||
|
keys.getKeyUpdateInterval(), keys.getTokenLifeTime(),
|
||||||
|
convert(keys.getCurrentKey()), convertBlockKeys(keys.getAllKeysList()));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static CheckpointSignatureProto convert(CheckpointSignature s) {
|
||||||
|
return CheckpointSignatureProto.newBuilder()
|
||||||
|
.setBlockPoolId(s.getBlockpoolID())
|
||||||
|
.setCurSegmentTxId(s.getCurSegmentTxId())
|
||||||
|
.setMostRecentCheckpointTxId(s.getMostRecentCheckpointTxId())
|
||||||
|
.setStorageInfo(PBHelper.convert((StorageInfo) s)).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static CheckpointSignature convert(CheckpointSignatureProto s) {
|
||||||
|
return new CheckpointSignature(PBHelper.convert(s.getStorageInfo()),
|
||||||
|
s.getBlockPoolId(), s.getMostRecentCheckpointTxId(),
|
||||||
|
s.getCurSegmentTxId());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static RemoteEditLogProto convert(RemoteEditLog log) {
|
||||||
|
return RemoteEditLogProto.newBuilder().setEndTxId(log.getEndTxId())
|
||||||
|
.setStartTxId(log.getStartTxId()).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static RemoteEditLog convert(RemoteEditLogProto l) {
|
||||||
|
return new RemoteEditLog(l.getStartTxId(), l.getEndTxId());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static RemoteEditLogManifestProto convert(
|
||||||
|
RemoteEditLogManifest manifest) {
|
||||||
|
RemoteEditLogManifestProto.Builder builder = RemoteEditLogManifestProto
|
||||||
|
.newBuilder();
|
||||||
|
for (RemoteEditLog log : manifest.getLogs()) {
|
||||||
|
builder.addLogs(convert(log));
|
||||||
|
}
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static RemoteEditLogManifest convert(
|
||||||
|
RemoteEditLogManifestProto manifest) {
|
||||||
|
List<RemoteEditLog> logs = new ArrayList<RemoteEditLog>(manifest
|
||||||
|
.getLogsList().size());
|
||||||
|
for (RemoteEditLogProto l : manifest.getLogsList()) {
|
||||||
|
logs.add(convert(l));
|
||||||
|
}
|
||||||
|
return new RemoteEditLogManifest(logs);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static CheckpointCommandProto convert(CheckpointCommand cmd) {
|
||||||
|
return CheckpointCommandProto.newBuilder()
|
||||||
|
.setSignature(convert(cmd.getSignature())).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static NamenodeCommandProto convert(NamenodeCommand cmd) {
|
||||||
|
if (cmd instanceof CheckpointCommand) {
|
||||||
|
return NamenodeCommandProto.newBuilder().setAction(cmd.getAction())
|
||||||
|
.setType(NamenodeCommandProto.Type.NamenodeCommand)
|
||||||
|
.setCheckpointCmd(convert((CheckpointCommand) cmd)).build();
|
||||||
|
}
|
||||||
|
return NamenodeCommandProto.newBuilder().setAction(cmd.getAction()).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static BlockKey[] convertBlockKeys(List<BlockKeyProto> list) {
|
||||||
|
BlockKey[] ret = new BlockKey[list.size()];
|
||||||
|
int i = 0;
|
||||||
|
for (BlockKeyProto k : list) {
|
||||||
|
ret[i++] = convert(k);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static NamespaceInfo convert(NamespaceInfoProto info) {
|
||||||
|
StorageInfoProto storage = info.getStorageInfo();
|
||||||
|
return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(),
|
||||||
|
info.getBlockPoolID(), storage.getCTime(), info.getDistUpgradeVersion());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static NamenodeCommand convert(NamenodeCommandProto cmd) {
|
||||||
|
switch (cmd.getType()) {
|
||||||
|
case CheckPointCommand:
|
||||||
|
CheckpointCommandProto chkPt = cmd.getCheckpointCmd();
|
||||||
|
return new CheckpointCommand(PBHelper.convert(chkPt.getSignature()),
|
||||||
|
chkPt.getNeedToReturnImage());
|
||||||
|
default:
|
||||||
|
return new NamenodeCommand(cmd.getAction());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ExtendedBlockProto convert(ExtendedBlock b) {
|
||||||
|
return ExtendedBlockProto.newBuilder().setBlockId(b.getBlockId())
|
||||||
|
.setGenerationStamp(b.getGenerationStamp())
|
||||||
|
.setNumBytes(b.getNumBytes()).setPoolId(b.getBlockPoolId()).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ExtendedBlock convert(ExtendedBlockProto b) {
|
||||||
|
return new ExtendedBlock(b.getPoolId(), b.getBlockId(), b.getNumBytes(),
|
||||||
|
b.getGenerationStamp());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static RecoveringBlockProto convert(RecoveringBlock b) {
|
||||||
|
LocatedBlockProto lb = PBHelper.convert((LocatedBlock)b);
|
||||||
|
return RecoveringBlockProto.newBuilder().setBlock(lb)
|
||||||
|
.setNewGenStamp(b.getNewGenerationStamp()).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static RecoveringBlock convert(RecoveringBlockProto b) {
|
||||||
|
ExtendedBlock block = convert(b.getBlock().getB());
|
||||||
|
DatanodeInfo[] locs = convert(b.getBlock().getLocsList());
|
||||||
|
return new RecoveringBlock(block, locs, b.getNewGenStamp());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static DatanodeInfo[] convert(List<DatanodeInfoProto> list) {
|
||||||
|
DatanodeInfo[] info = new DatanodeInfo[list.size()];
|
||||||
|
for (int i = 0; i < info.length; i++) {
|
||||||
|
info[i] = convert(list.get(i));
|
||||||
|
}
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static DatanodeInfo convert(DatanodeInfoProto info) {
|
||||||
|
DatanodeIDProto dnId = info.getId();
|
||||||
|
return new DatanodeInfo(dnId.getName(), dnId.getStorageID(),
|
||||||
|
dnId.getInfoPort(), dnId.getIpcPort(), info.getCapacity(),
|
||||||
|
info.getDfsUsed(), info.getRemaining(), info.getBlockPoolUsed(),
|
||||||
|
info.getLastUpdate(), info.getXceiverCount(), info.getLocation(),
|
||||||
|
info.getHostName(), convert(info.getAdminState()));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static DatanodeInfoProto convert(DatanodeInfo info) {
|
||||||
|
return DatanodeInfoProto.newBuilder()
|
||||||
|
.setAdminState(PBHelper.convert(info.getAdminState()))
|
||||||
|
.setBlockPoolUsed(info.getBlockPoolUsed())
|
||||||
|
.setCapacity(info.getCapacity())
|
||||||
|
.setDfsUsed(info.getDfsUsed())
|
||||||
|
.setHostName(info.getHostName())
|
||||||
|
.setId(PBHelper.convert((DatanodeID)info))
|
||||||
|
.setLastUpdate(info.getLastUpdate())
|
||||||
|
.setLocation(info.getNetworkLocation())
|
||||||
|
.setRemaining(info.getRemaining())
|
||||||
|
.setXceiverCount(info.getXceiverCount())
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static AdminStates convert(AdminState adminState) {
|
||||||
|
switch(adminState) {
|
||||||
|
case DECOMMISSION_INPROGRESS:
|
||||||
|
return AdminStates.DECOMMISSION_INPROGRESS;
|
||||||
|
case DECOMMISSIONED:
|
||||||
|
return AdminStates.DECOMMISSIONED;
|
||||||
|
case NORMAL:
|
||||||
|
default:
|
||||||
|
return AdminStates.NORMAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static AdminState convert(AdminStates adminState) {
|
||||||
|
switch(adminState) {
|
||||||
|
case DECOMMISSION_INPROGRESS:
|
||||||
|
return AdminState.DECOMMISSION_INPROGRESS;
|
||||||
|
case DECOMMISSIONED:
|
||||||
|
return AdminState.DECOMMISSIONED;
|
||||||
|
case NORMAL:
|
||||||
|
default:
|
||||||
|
return AdminState.NORMAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static LocatedBlockProto convert(LocatedBlock b) {
|
||||||
|
Builder builder = LocatedBlockProto.newBuilder();
|
||||||
|
DatanodeInfo[] locs = b.getLocations();
|
||||||
|
for(DatanodeInfo loc : locs) {
|
||||||
|
builder.addLocs(PBHelper.convert(loc));
|
||||||
|
}
|
||||||
|
return builder.setB(PBHelper.convert(b.getBlock()))
|
||||||
|
.setBlockToken(PBHelper.convert(b.getBlockToken()))
|
||||||
|
.setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static BlockTokenIdentifierProto convert(
|
||||||
|
Token<BlockTokenIdentifier> token) {
|
||||||
|
ByteString tokenId = ByteString.copyFrom(token.getIdentifier());
|
||||||
|
ByteString password = ByteString.copyFrom(token.getPassword());
|
||||||
|
return BlockTokenIdentifierProto.newBuilder().setIdentifier(tokenId)
|
||||||
|
.setKind(token.getKind().toString()).setPassword(password)
|
||||||
|
.setService(token.getService().toString()).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Token<BlockTokenIdentifier> convert(
|
||||||
|
BlockTokenIdentifierProto blockToken) {
|
||||||
|
return new Token<BlockTokenIdentifier>(blockToken.getIdentifier()
|
||||||
|
.toByteArray(), blockToken.getPassword().toByteArray(), new Text(
|
||||||
|
blockToken.getKind()), new Text(blockToken.getService()));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ReplicaState convert(ReplicaStateProto state) {
|
||||||
|
switch (state) {
|
||||||
|
case RBW:
|
||||||
|
return ReplicaState.RBW;
|
||||||
|
case RUR:
|
||||||
|
return ReplicaState.RUR;
|
||||||
|
case RWR:
|
||||||
|
return ReplicaState.RWR;
|
||||||
|
case TEMPORARY:
|
||||||
|
return ReplicaState.TEMPORARY;
|
||||||
|
case FINALIZED:
|
||||||
|
default:
|
||||||
|
return ReplicaState.FINALIZED;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,4 +36,8 @@ public class BlockKey extends DelegationKey {
|
||||||
public BlockKey(int keyId, long expiryDate, SecretKey key) {
|
public BlockKey(int keyId, long expiryDate, SecretKey key) {
|
||||||
super(keyId, expiryDate, key);
|
super(keyId, expiryDate, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public BlockKey(int keyId, long expiryDate, byte[] encodedKey) {
|
||||||
|
super(keyId, expiryDate, encodedKey);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,14 +68,28 @@ public final class Util {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Converts the passed File to a URI.
|
* Converts the passed File to a URI. This method trims the trailing slash if
|
||||||
|
* one is appended because the underlying file is in fact a directory that
|
||||||
|
* exists.
|
||||||
*
|
*
|
||||||
* @param f the file to convert
|
* @param f the file to convert
|
||||||
* @return the resulting URI
|
* @return the resulting URI
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static URI fileAsURI(File f) throws IOException {
|
public static URI fileAsURI(File f) throws IOException {
|
||||||
return f.getCanonicalFile().toURI();
|
URI u = f.getCanonicalFile().toURI();
|
||||||
|
|
||||||
|
// trim the trailing slash, if it's present
|
||||||
|
if (u.getPath().endsWith("/")) {
|
||||||
|
String uriAsString = u.toString();
|
||||||
|
try {
|
||||||
|
u = new URI(uriAsString.substring(0, uriAsString.length() - 1));
|
||||||
|
} catch (URISyntaxException e) {
|
||||||
|
throw new IOException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return u;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementers of this class represent a NN resource whose availability can be
|
||||||
|
* checked. A resource can be either "required" or "redundant". All required
|
||||||
|
* resources must be available for the NN to continue operating. The NN will
|
||||||
|
* continue to operate as long as *any* redundant resource is available.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
interface CheckableNameNodeResource {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Is this resource currently available.
|
||||||
|
*
|
||||||
|
* @return true if and only if the resource in question is available.
|
||||||
|
*/
|
||||||
|
public boolean isResourceAvailable();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Is this resource required.
|
||||||
|
*
|
||||||
|
* @return true if and only if the resource in question is required for NN operation.
|
||||||
|
*/
|
||||||
|
public boolean isRequired();
|
||||||
|
|
||||||
|
}
|
|
@ -22,10 +22,10 @@ import java.net.URI;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.lang.reflect.Constructor;
|
import java.lang.reflect.Constructor;
|
||||||
|
|
||||||
|
import org.apache.commons.lang.StringUtils;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
@ -126,6 +126,13 @@ public class FSEditLog {
|
||||||
private NNStorage storage;
|
private NNStorage storage;
|
||||||
private Configuration conf;
|
private Configuration conf;
|
||||||
|
|
||||||
|
private Collection<URI> editsDirs;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The edit directories that are shared between primary and secondary.
|
||||||
|
*/
|
||||||
|
private Collection<URI> sharedEditsDirs;
|
||||||
|
|
||||||
private static class TransactionId {
|
private static class TransactionId {
|
||||||
public long txid;
|
public long txid;
|
||||||
|
|
||||||
|
@ -141,24 +148,22 @@ public class FSEditLog {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
final private Collection<URI> editsDirs;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The edit directories that are shared between primary and secondary.
|
|
||||||
*/
|
|
||||||
final private Collection<URI> sharedEditsDirs;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Construct FSEditLog with default configuration, taking editDirs from NNStorage
|
* Construct FSEditLog with default configuration, taking editDirs from NNStorage
|
||||||
|
*
|
||||||
* @param storage Storage object used by namenode
|
* @param storage Storage object used by namenode
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
FSEditLog(NNStorage storage) {
|
FSEditLog(NNStorage storage) throws IOException {
|
||||||
this(new Configuration(), storage, Collections.<URI>emptyList());
|
Configuration conf = new Configuration();
|
||||||
|
// Make sure the edits dirs are set in the provided configuration object.
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||||
|
StringUtils.join(storage.getEditsDirectories(), ","));
|
||||||
|
init(conf, storage, FSNamesystem.getNamespaceEditsDirs(conf));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor for FSEditLog. Add underlying journals are constructed, but
|
* Constructor for FSEditLog. Underlying journals are constructed, but
|
||||||
* no streams are opened until open() is called.
|
* no streams are opened until open() is called.
|
||||||
*
|
*
|
||||||
* @param conf The namenode configuration
|
* @param conf The namenode configuration
|
||||||
|
@ -166,26 +171,19 @@ public class FSEditLog {
|
||||||
* @param editsDirs List of journals to use
|
* @param editsDirs List of journals to use
|
||||||
*/
|
*/
|
||||||
FSEditLog(Configuration conf, NNStorage storage, Collection<URI> editsDirs) {
|
FSEditLog(Configuration conf, NNStorage storage, Collection<URI> editsDirs) {
|
||||||
this.conf = conf;
|
init(conf, storage, editsDirs);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void init(Configuration conf, NNStorage storage, Collection<URI> editsDirs) {
|
||||||
isSyncRunning = false;
|
isSyncRunning = false;
|
||||||
|
this.conf = conf;
|
||||||
this.storage = storage;
|
this.storage = storage;
|
||||||
metrics = NameNode.getNameNodeMetrics();
|
metrics = NameNode.getNameNodeMetrics();
|
||||||
lastPrintTime = now();
|
lastPrintTime = now();
|
||||||
|
|
||||||
if (editsDirs.isEmpty()) {
|
// If this list is empty, an error will be thrown on first use
|
||||||
// if this is the case, no edit dirs have been explictly configured
|
// of the editlog, as no journals will exist
|
||||||
// image dirs are to be used for edits too
|
this.editsDirs = Lists.newArrayList(editsDirs);
|
||||||
try {
|
|
||||||
editsDirs = Lists.newArrayList(storage.getEditsDirectories());
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
// cannot get list from storage, so the empty editsDirs
|
|
||||||
// will be assigned. an error will be thrown on first use
|
|
||||||
// of the editlog, as no journals will exist
|
|
||||||
}
|
|
||||||
this.editsDirs = editsDirs;
|
|
||||||
} else {
|
|
||||||
this.editsDirs = Lists.newArrayList(editsDirs);
|
|
||||||
}
|
|
||||||
|
|
||||||
this.sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf);
|
this.sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf);
|
||||||
}
|
}
|
||||||
|
@ -212,15 +210,21 @@ public class FSEditLog {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void initJournals(Collection<URI> dirs) {
|
private void initJournals(Collection<URI> dirs) {
|
||||||
this.journalSet = new JournalSet();
|
int minimumRedundantJournals = conf.getInt(
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY,
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT);
|
||||||
|
|
||||||
|
journalSet = new JournalSet(minimumRedundantJournals);
|
||||||
for (URI u : dirs) {
|
for (URI u : dirs) {
|
||||||
|
boolean required = FSNamesystem.getRequiredNamespaceEditsDirs(conf)
|
||||||
|
.contains(u);
|
||||||
if (u.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) {
|
if (u.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) {
|
||||||
StorageDirectory sd = storage.getStorageDirectory(u);
|
StorageDirectory sd = storage.getStorageDirectory(u);
|
||||||
if (sd != null) {
|
if (sd != null) {
|
||||||
journalSet.add(new FileJournalManager(sd));
|
journalSet.add(new FileJournalManager(sd), required);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
journalSet.add(createJournal(u));
|
journalSet.add(createJournal(u), required);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -491,7 +495,7 @@ public class FSEditLog {
|
||||||
}
|
}
|
||||||
editLogStream.setReadyToFlush();
|
editLogStream.setReadyToFlush();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.fatal("Could not sync any journal to persistent storage. "
|
LOG.fatal("Could not sync enough journals to persistent storage. "
|
||||||
+ "Unsynced transactions: " + (txid - synctxid),
|
+ "Unsynced transactions: " + (txid - synctxid),
|
||||||
new Exception());
|
new Exception());
|
||||||
runtime.exit(1);
|
runtime.exit(1);
|
||||||
|
@ -513,7 +517,7 @@ public class FSEditLog {
|
||||||
}
|
}
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
LOG.fatal("Could not sync any journal to persistent storage. "
|
LOG.fatal("Could not sync enough journals to persistent storage. "
|
||||||
+ "Unsynced transactions: " + (txid - synctxid), new Exception());
|
+ "Unsynced transactions: " + (txid - synctxid), new Exception());
|
||||||
runtime.exit(1);
|
runtime.exit(1);
|
||||||
}
|
}
|
||||||
|
@ -966,7 +970,7 @@ public class FSEditLog {
|
||||||
|
|
||||||
LOG.info("Registering new backup node: " + bnReg);
|
LOG.info("Registering new backup node: " + bnReg);
|
||||||
BackupJournalManager bjm = new BackupJournalManager(bnReg, nnReg);
|
BackupJournalManager bjm = new BackupJournalManager(bnReg, nnReg);
|
||||||
journalSet.add(bjm);
|
journalSet.add(bjm, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized void releaseBackupStream(NamenodeRegistration registration)
|
synchronized void releaseBackupStream(NamenodeRegistration registration)
|
||||||
|
|
|
@ -759,7 +759,7 @@ public class FSImage implements Closeable {
|
||||||
* FSImageSaver assumes that it was launched from a thread that holds
|
* FSImageSaver assumes that it was launched from a thread that holds
|
||||||
* FSNamesystem lock and waits for the execution of FSImageSaver thread
|
* FSNamesystem lock and waits for the execution of FSImageSaver thread
|
||||||
* to finish.
|
* to finish.
|
||||||
* This way we are guraranteed that the namespace is not being updated
|
* This way we are guaranteed that the namespace is not being updated
|
||||||
* while multiple instances of FSImageSaver are traversing it
|
* while multiple instances of FSImageSaver are traversing it
|
||||||
* and writing it out.
|
* and writing it out.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -33,6 +33,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
|
||||||
|
@ -548,6 +549,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY);
|
return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static Collection<URI> getRequiredNamespaceEditsDirs(Configuration conf) {
|
||||||
|
return getStorageDirs(conf, DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY);
|
||||||
|
}
|
||||||
|
|
||||||
private static Collection<URI> getStorageDirs(Configuration conf,
|
private static Collection<URI> getStorageDirs(Configuration conf,
|
||||||
String propertyName) {
|
String propertyName) {
|
||||||
Collection<String> dirNames = conf.getTrimmedStringCollection(propertyName);
|
Collection<String> dirNames = conf.getTrimmedStringCollection(propertyName);
|
||||||
|
@ -581,7 +586,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
public static Collection<URI> getNamespaceEditsDirs(Configuration conf) {
|
public static Collection<URI> getNamespaceEditsDirs(Configuration conf) {
|
||||||
Collection<URI> editsDirs = getStorageDirs(conf, DFS_NAMENODE_EDITS_DIR_KEY);
|
Collection<URI> editsDirs = getStorageDirs(conf, DFS_NAMENODE_EDITS_DIR_KEY);
|
||||||
editsDirs.addAll(getSharedEditsDirs(conf));
|
editsDirs.addAll(getSharedEditsDirs(conf));
|
||||||
return editsDirs;
|
if (editsDirs.isEmpty()) {
|
||||||
|
// If this is the case, no edit dirs have been explicitly configured.
|
||||||
|
// Image dirs are to be used for edits too.
|
||||||
|
return getNamespaceDirs(conf);
|
||||||
|
} else {
|
||||||
|
return editsDirs;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -50,17 +50,16 @@ public class JournalSet implements JournalManager {
|
||||||
*
|
*
|
||||||
* If a Journal gets disabled due to an error writing to its
|
* If a Journal gets disabled due to an error writing to its
|
||||||
* stream, then the stream will be aborted and set to null.
|
* stream, then the stream will be aborted and set to null.
|
||||||
*
|
|
||||||
* This should be used outside JournalSet only for testing.
|
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
static class JournalAndStream implements CheckableNameNodeResource {
|
||||||
static class JournalAndStream {
|
|
||||||
private final JournalManager journal;
|
private final JournalManager journal;
|
||||||
private boolean disabled = false;
|
private boolean disabled = false;
|
||||||
private EditLogOutputStream stream;
|
private EditLogOutputStream stream;
|
||||||
|
private boolean required = false;
|
||||||
|
|
||||||
public JournalAndStream(JournalManager manager) {
|
public JournalAndStream(JournalManager manager, boolean required) {
|
||||||
this.journal = manager;
|
this.journal = manager;
|
||||||
|
this.required = required;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void startLogSegment(long txId) throws IOException {
|
public void startLogSegment(long txId) throws IOException {
|
||||||
|
@ -132,9 +131,24 @@ public class JournalSet implements JournalManager {
|
||||||
private void setDisabled(boolean disabled) {
|
private void setDisabled(boolean disabled) {
|
||||||
this.disabled = disabled;
|
this.disabled = disabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isResourceAvailable() {
|
||||||
|
return !isDisabled();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isRequired() {
|
||||||
|
return required;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<JournalAndStream> journals = Lists.newArrayList();
|
private List<JournalAndStream> journals = Lists.newArrayList();
|
||||||
|
final int minimumRedundantJournals;
|
||||||
|
|
||||||
|
JournalSet(int minimumRedundantResources) {
|
||||||
|
this.minimumRedundantJournals = minimumRedundantResources;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public EditLogOutputStream startLogSegment(final long txId) throws IOException {
|
public EditLogOutputStream startLogSegment(final long txId) throws IOException {
|
||||||
|
@ -232,16 +246,15 @@ public class JournalSet implements JournalManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if there are no journals or all are disabled.
|
* Returns true if there are no journals, all redundant journals are disabled,
|
||||||
* @return True if no journals or all are disabled.
|
* or any required journals are disabled.
|
||||||
|
*
|
||||||
|
* @return True if there no journals, all redundant journals are disabled,
|
||||||
|
* or any required journals are disabled.
|
||||||
*/
|
*/
|
||||||
public boolean isEmpty() {
|
public boolean isEmpty() {
|
||||||
for (JournalAndStream jas : journals) {
|
return !NameNodeResourcePolicy.areResourcesAvailable(journals,
|
||||||
if (!jas.isDisabled()) {
|
minimumRedundantJournals);
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -292,9 +305,11 @@ public class JournalSet implements JournalManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
disableAndReportErrorOnJournals(badJAS);
|
disableAndReportErrorOnJournals(badJAS);
|
||||||
if (badJAS.size() >= journals.size()) {
|
if (!NameNodeResourcePolicy.areResourcesAvailable(journals,
|
||||||
LOG.error("Error: "+status+" failed for all journals");
|
minimumRedundantJournals)) {
|
||||||
throw new IOException(status+" failed on all the journals");
|
String message = status + " failed for too many journals";
|
||||||
|
LOG.error("Error: " + message);
|
||||||
|
throw new IOException(message);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -450,8 +465,9 @@ public class JournalSet implements JournalManager {
|
||||||
return jList;
|
return jList;
|
||||||
}
|
}
|
||||||
|
|
||||||
void add(JournalManager j) {
|
void add(JournalManager j, boolean required) {
|
||||||
journals.add(new JournalAndStream(j));
|
JournalAndStream jas = new JournalAndStream(j, required);
|
||||||
|
journals.add(jas);
|
||||||
}
|
}
|
||||||
|
|
||||||
void remove(JournalManager j) {
|
void remove(JournalManager j) {
|
||||||
|
|
|
@ -27,6 +27,7 @@ import java.util.Map;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.DF;
|
import org.apache.hadoop.fs.DF;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
@ -40,29 +41,74 @@ import com.google.common.base.Predicate;
|
||||||
*
|
*
|
||||||
* NameNodeResourceChecker provides a method -
|
* NameNodeResourceChecker provides a method -
|
||||||
* <code>hasAvailableDiskSpace</code> - which will return true if and only if
|
* <code>hasAvailableDiskSpace</code> - which will return true if and only if
|
||||||
* the NameNode has disk space available on all volumes which are configured to
|
* the NameNode has disk space available on all required volumes, and any volume
|
||||||
* be checked. Volumes containing file system name/edits dirs are added by
|
* which is configured to be redundant. Volumes containing file system edits dirs
|
||||||
* default, and arbitrary extra volumes may be configured as well.
|
* are added by default, and arbitrary extra volumes may be configured as well.
|
||||||
*/
|
*/
|
||||||
public class NameNodeResourceChecker {
|
@InterfaceAudience.Private
|
||||||
|
class NameNodeResourceChecker {
|
||||||
private static final Log LOG = LogFactory.getLog(NameNodeResourceChecker.class.getName());
|
private static final Log LOG = LogFactory.getLog(NameNodeResourceChecker.class.getName());
|
||||||
|
|
||||||
// Space (in bytes) reserved per volume.
|
// Space (in bytes) reserved per volume.
|
||||||
private long duReserved;
|
private long duReserved;
|
||||||
|
|
||||||
private final Configuration conf;
|
private final Configuration conf;
|
||||||
private Map<String, DF> volumes;
|
private Map<String, CheckedVolume> volumes;
|
||||||
|
private int minimumRedundantVolumes;
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
class CheckedVolume implements CheckableNameNodeResource {
|
||||||
|
private DF df;
|
||||||
|
private boolean required;
|
||||||
|
private String volume;
|
||||||
|
|
||||||
|
public CheckedVolume(File dirToCheck, boolean required)
|
||||||
|
throws IOException {
|
||||||
|
df = new DF(dirToCheck, conf);
|
||||||
|
this.required = required;
|
||||||
|
volume = df.getFilesystem();
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getVolume() {
|
||||||
|
return volume;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isRequired() {
|
||||||
|
return required;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isResourceAvailable() {
|
||||||
|
long availableSpace = df.getAvailable();
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Space available on volume '" + volume + "' is "
|
||||||
|
+ availableSpace);
|
||||||
|
}
|
||||||
|
if (availableSpace < duReserved) {
|
||||||
|
LOG.warn("Space available on volume '" + volume + "' is "
|
||||||
|
+ availableSpace +
|
||||||
|
", which is below the configured reserved amount " + duReserved);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "volume: " + volume + " required: " + required +
|
||||||
|
" resource available: " + isResourceAvailable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a NameNodeResourceChecker, which will check the name dirs and edits
|
* Create a NameNodeResourceChecker, which will check the edits dirs and any
|
||||||
* dirs set in <code>conf</code>.
|
* additional dirs to check set in <code>conf</code>.
|
||||||
*
|
|
||||||
* @param conf
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
*/
|
||||||
public NameNodeResourceChecker(Configuration conf) throws IOException {
|
public NameNodeResourceChecker(Configuration conf) throws IOException {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
volumes = new HashMap<String, DF>();
|
volumes = new HashMap<String, CheckedVolume>();
|
||||||
|
|
||||||
duReserved = conf.getLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,
|
duReserved = conf.getLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_DEFAULT);
|
DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_DEFAULT);
|
||||||
|
@ -70,8 +116,6 @@ public class NameNodeResourceChecker {
|
||||||
Collection<URI> extraCheckedVolumes = Util.stringCollectionAsURIs(conf
|
Collection<URI> extraCheckedVolumes = Util.stringCollectionAsURIs(conf
|
||||||
.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY));
|
.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY));
|
||||||
|
|
||||||
addDirsToCheck(FSNamesystem.getNamespaceDirs(conf));
|
|
||||||
|
|
||||||
Collection<URI> localEditDirs = Collections2.filter(
|
Collection<URI> localEditDirs = Collections2.filter(
|
||||||
FSNamesystem.getNamespaceEditsDirs(conf),
|
FSNamesystem.getNamespaceEditsDirs(conf),
|
||||||
new Predicate<URI>() {
|
new Predicate<URI>() {
|
||||||
|
@ -82,70 +126,86 @@ public class NameNodeResourceChecker {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
addDirsToCheck(localEditDirs);
|
|
||||||
addDirsToCheck(extraCheckedVolumes);
|
// Add all the local edits dirs, marking some as required if they are
|
||||||
|
// configured as such.
|
||||||
|
for (URI editsDirToCheck : localEditDirs) {
|
||||||
|
addDirToCheck(editsDirToCheck,
|
||||||
|
FSNamesystem.getRequiredNamespaceEditsDirs(conf).contains(
|
||||||
|
editsDirToCheck));
|
||||||
|
}
|
||||||
|
|
||||||
|
// All extra checked volumes are marked "required"
|
||||||
|
for (URI extraDirToCheck : extraCheckedVolumes) {
|
||||||
|
addDirToCheck(extraDirToCheck, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
minimumRedundantVolumes = conf.getInt(
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY,
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add the passed-in directories to the list of volumes to check.
|
* Add the volume of the passed-in directory to the list of volumes to check.
|
||||||
|
* If <code>required</code> is true, and this volume is already present, but
|
||||||
|
* is marked redundant, it will be marked required. If the volume is already
|
||||||
|
* present but marked required then this method is a no-op.
|
||||||
*
|
*
|
||||||
* @param directoriesToCheck
|
* @param directoryToCheck
|
||||||
* The directories whose volumes will be checked for available space.
|
* The directory whose volume will be checked for available space.
|
||||||
* @throws IOException
|
|
||||||
*/
|
*/
|
||||||
private void addDirsToCheck(Collection<URI> directoriesToCheck)
|
private void addDirToCheck(URI directoryToCheck, boolean required)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
for (URI directoryUri : directoriesToCheck) {
|
File dir = new File(directoryToCheck.getPath());
|
||||||
File dir = new File(directoryUri.getPath());
|
if (!dir.exists()) {
|
||||||
if (!dir.exists()) {
|
throw new IOException("Missing directory "+dir.getAbsolutePath());
|
||||||
throw new IOException("Missing directory "+dir.getAbsolutePath());
|
}
|
||||||
}
|
|
||||||
DF df = new DF(dir, conf);
|
CheckedVolume newVolume = new CheckedVolume(dir, required);
|
||||||
volumes.put(df.getFilesystem(), df);
|
CheckedVolume volume = volumes.get(newVolume.getVolume());
|
||||||
|
if (volume == null || (volume != null && !volume.isRequired())) {
|
||||||
|
volumes.put(newVolume.getVolume(), newVolume);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return true if disk space is available on at least one of the configured
|
* Return true if disk space is available on at least one of the configured
|
||||||
* volumes.
|
* redundant volumes, and all of the configured required volumes.
|
||||||
*
|
*
|
||||||
* @return True if the configured amount of disk space is available on at
|
* @return True if the configured amount of disk space is available on at
|
||||||
* least one volume, false otherwise.
|
* least one redundant volume and all of the required volumes, false
|
||||||
* @throws IOException
|
* otherwise.
|
||||||
*/
|
*/
|
||||||
boolean hasAvailableDiskSpace()
|
boolean hasAvailableDiskSpace()
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return getVolumesLowOnSpace().size() < volumes.size();
|
return NameNodeResourcePolicy.areResourcesAvailable(volumes.values(),
|
||||||
|
minimumRedundantVolumes);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the set of directories which are low on space.
|
* Return the set of directories which are low on space.
|
||||||
|
*
|
||||||
* @return the set of directories whose free space is below the threshold.
|
* @return the set of directories whose free space is below the threshold.
|
||||||
* @throws IOException
|
|
||||||
*/
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
Collection<String> getVolumesLowOnSpace() throws IOException {
|
Collection<String> getVolumesLowOnSpace() throws IOException {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Going to check the following volumes disk space: " + volumes);
|
LOG.debug("Going to check the following volumes disk space: " + volumes);
|
||||||
}
|
}
|
||||||
Collection<String> lowVolumes = new ArrayList<String>();
|
Collection<String> lowVolumes = new ArrayList<String>();
|
||||||
for (DF volume : volumes.values()) {
|
for (CheckedVolume volume : volumes.values()) {
|
||||||
long availableSpace = volume.getAvailable();
|
lowVolumes.add(volume.getVolume());
|
||||||
String fileSystem = volume.getFilesystem();
|
|
||||||
if (LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("Space available on volume '" + fileSystem + "' is " + availableSpace);
|
|
||||||
}
|
|
||||||
if (availableSpace < duReserved) {
|
|
||||||
LOG.warn("Space available on volume '" + fileSystem + "' is "
|
|
||||||
+ availableSpace +
|
|
||||||
", which is below the configured reserved amount " + duReserved);
|
|
||||||
lowVolumes.add(volume.getFilesystem());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return lowVolumes;
|
return lowVolumes;
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
void setVolumes(Map<String, DF> volumes) {
|
void setVolumes(Map<String, CheckedVolume> volumes) {
|
||||||
this.volumes = volumes;
|
this.volumes = volumes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
void setMinimumReduntdantVolumes(int minimumRedundantVolumes) {
|
||||||
|
this.minimumRedundantVolumes = minimumRedundantVolumes;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,81 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import java.util.Collection;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a set of checkable resources, this class is capable of determining
|
||||||
|
* whether sufficient resources are available for the NN to continue operating.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
final class NameNodeResourcePolicy {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return true if and only if there are sufficient NN
|
||||||
|
* resources to continue logging edits.
|
||||||
|
*
|
||||||
|
* @param resources the collection of resources to check.
|
||||||
|
* @param minimumRedundantResources the minimum number of redundant resources
|
||||||
|
* required to continue operation.
|
||||||
|
* @return true if and only if there are sufficient NN resources to
|
||||||
|
* continue logging edits.
|
||||||
|
* @throws RuntimeException if the number of <bold>configured</bold>
|
||||||
|
* redundant resources is fewer than the minimum number of available
|
||||||
|
* redundant resources.
|
||||||
|
*/
|
||||||
|
static boolean areResourcesAvailable(
|
||||||
|
Collection<? extends CheckableNameNodeResource> resources,
|
||||||
|
int minimumRedundantResources) {
|
||||||
|
|
||||||
|
int requiredResourceCount = 0;
|
||||||
|
int redundantResourceCount = 0;
|
||||||
|
int disabledRedundantResourceCount = 0;
|
||||||
|
for (CheckableNameNodeResource resource : resources) {
|
||||||
|
if (!resource.isRequired()) {
|
||||||
|
redundantResourceCount++;
|
||||||
|
if (!resource.isResourceAvailable()) {
|
||||||
|
disabledRedundantResourceCount++;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
requiredResourceCount++;
|
||||||
|
if (!resource.isResourceAvailable()) {
|
||||||
|
// Short circuit - a required resource is not available.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (redundantResourceCount < minimumRedundantResources) {
|
||||||
|
throw new RuntimeException("Need a minimum of " + minimumRedundantResources
|
||||||
|
+ " for NN to operate but only " + redundantResourceCount
|
||||||
|
+ " are configured.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (redundantResourceCount == 0) {
|
||||||
|
// If there are no redundant resources, return true if there are any
|
||||||
|
// required resources available.
|
||||||
|
return requiredResourceCount > 0;
|
||||||
|
} else {
|
||||||
|
return redundantResourceCount - disabledRedundantResourceCount >=
|
||||||
|
minimumRedundantResources;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,122 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// This file contains protocol buffers that are used throughout HDFS -- i.e.
|
||||||
|
// by the client, server, and data transfer protocols.
|
||||||
|
|
||||||
|
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
|
||||||
|
option java_outer_classname = "ClientDatanodeProtocolProtos";
|
||||||
|
option java_generic_services = true;
|
||||||
|
option java_generate_equals_and_hash = true;
|
||||||
|
|
||||||
|
import "hdfs.proto";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* block - block for which visible length is requested
|
||||||
|
*/
|
||||||
|
message GetReplicaVisibleLengthRequestProto {
|
||||||
|
required ExtendedBlockProto block = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* length - visible length of the block
|
||||||
|
*/
|
||||||
|
message GetReplicaVisibleLengthResponseProto {
|
||||||
|
required uint64 length = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* void request
|
||||||
|
*/
|
||||||
|
message RefreshNamenodesRequestProto {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* void response
|
||||||
|
*/
|
||||||
|
message RefreshNamenodesResponseProto {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blockPool - block pool to be deleted
|
||||||
|
* force - if false, delete the block pool only if it is empty.
|
||||||
|
* if true, delete the block pool even if it has blocks.
|
||||||
|
*/
|
||||||
|
message DeleteBlockPoolRequestProto {
|
||||||
|
required string blockPool = 1;
|
||||||
|
required bool force = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* void response
|
||||||
|
*/
|
||||||
|
message DeleteBlockPoolResponseProto {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the file information where block and its metadata is stored
|
||||||
|
* block - block for which path information is being requested
|
||||||
|
* token - block token
|
||||||
|
*/
|
||||||
|
message GetBlockLocalPathInfoRequestProto {
|
||||||
|
required ExtendedBlockProto block = 1;
|
||||||
|
required BlockTokenIdentifierProto token = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* block - block for which file path information is being returned
|
||||||
|
* localPath - file path where the block data is stored
|
||||||
|
* localMetaPath - file path where the block meta data is stored
|
||||||
|
*/
|
||||||
|
message GetBlockLocalPathInfoResponseProto {
|
||||||
|
required ExtendedBlockProto block = 1;
|
||||||
|
required string localPath = 2;
|
||||||
|
required string localMetaPath = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protocol used from client to the Datanode.
|
||||||
|
* See the request and response for details of rpc call.
|
||||||
|
*/
|
||||||
|
service ClientDatanodeProtocolService {
|
||||||
|
/**
|
||||||
|
* Returns the visible length of the replica
|
||||||
|
*/
|
||||||
|
rpc getReplicaVisibleLength(GetReplicaVisibleLengthRequestProto)
|
||||||
|
returns(GetReplicaVisibleLengthResponseProto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refresh the list of federated namenodes from updated configuration.
|
||||||
|
* Adds new namenodes and stops the deleted namenodes.
|
||||||
|
*/
|
||||||
|
rpc refreshNamenode(RefreshNamenodesRequestProto)
|
||||||
|
returns(RefreshNamenodesResponseProto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete the block pool from the datanode.
|
||||||
|
*/
|
||||||
|
rpc deleteBlockPool(DeleteBlockPoolRequestProto)
|
||||||
|
returns(DeleteBlockPoolResponseProto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Retrieves the path names of the block file and metadata file stored on the
|
||||||
|
* local file system.
|
||||||
|
*/
|
||||||
|
rpc getBlockLocalPathInfo(GetBlockLocalPathInfoRequestProto)
|
||||||
|
returns(GetBlockLocalPathInfoResponseProto);
|
||||||
|
}
|
|
@ -38,7 +38,7 @@ message InitReplicaRecoveryRequestProto {
|
||||||
* Repica recovery information
|
* Repica recovery information
|
||||||
*/
|
*/
|
||||||
message InitReplicaRecoveryResponseProto {
|
message InitReplicaRecoveryResponseProto {
|
||||||
required ReplicaState state = 1; // State fo the replica
|
required ReplicaStateProto state = 1; // State of the replica
|
||||||
required BlockProto block = 2; // block information
|
required BlockProto block = 2; // block information
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ message GetBlocksRequestProto {
|
||||||
* blocks - List of returned blocks
|
* blocks - List of returned blocks
|
||||||
*/
|
*/
|
||||||
message GetBlocksResponseProto {
|
message GetBlocksResponseProto {
|
||||||
required BlockWithLocationsProto blocks = 1; // List of blocks
|
required BlocksWithLocationsProto blocks = 1; // List of blocks
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -85,12 +85,25 @@ message RollEditLogResponseProto {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* registartion - Namenode reporting the error
|
* void request
|
||||||
|
*/
|
||||||
|
message VersionRequestProto {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* void request
|
||||||
|
*/
|
||||||
|
message VersionResponseProto {
|
||||||
|
required NamespaceInfoProto info = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* registration - Namenode reporting the error
|
||||||
* errorCode - error code indicating the error
|
* errorCode - error code indicating the error
|
||||||
* msg - Free text description of the error
|
* msg - Free text description of the error
|
||||||
*/
|
*/
|
||||||
message ErrorReportRequestProto {
|
message ErrorReportRequestProto {
|
||||||
required NamenodeRegistrationProto registartion = 1; // Registartion info
|
required NamenodeRegistrationProto registration = 1; // Registration info
|
||||||
required uint32 errorCode = 2; // Error code
|
required uint32 errorCode = 2; // Error code
|
||||||
required string msg = 3; // Error message
|
required string msg = 3; // Error message
|
||||||
}
|
}
|
||||||
|
@ -193,6 +206,11 @@ service NamenodeProtocolService {
|
||||||
*/
|
*/
|
||||||
rpc rollEditLog(RollEditLogRequestProto) returns(RollEditLogResponseProto);
|
rpc rollEditLog(RollEditLogRequestProto) returns(RollEditLogResponseProto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Close the current editlog and open a new one for checkpointing purposes
|
||||||
|
*/
|
||||||
|
rpc versionRequest(VersionRequestProto) returns(VersionResponseProto);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Report from a sub-ordinate namenode of an error to the active namenode.
|
* Report from a sub-ordinate namenode of an error to the active namenode.
|
||||||
* Active namenode may decide to unregister the reporting namenode
|
* Active namenode may decide to unregister the reporting namenode
|
||||||
|
|
|
@ -270,8 +270,8 @@ message BlockProto {
|
||||||
* Block and datanodes where is it located
|
* Block and datanodes where is it located
|
||||||
*/
|
*/
|
||||||
message BlockWithLocationsProto {
|
message BlockWithLocationsProto {
|
||||||
required BlockProto block = 1; // Block
|
required BlockProto block = 1; // Block
|
||||||
repeated DatanodeIDProto datanodeIDs = 2; // Datanodes with replicas of the block
|
repeated string datanodeIDs = 2; // Datanodes with replicas of the block
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -329,7 +329,7 @@ message ExportedBlockKeysProto {
|
||||||
/**
|
/**
|
||||||
* State of a block replica at a datanode
|
* State of a block replica at a datanode
|
||||||
*/
|
*/
|
||||||
enum ReplicaState {
|
enum ReplicaStateProto {
|
||||||
FINALIZED = 0; // State of a replica when it is not modified
|
FINALIZED = 0; // State of a replica when it is not modified
|
||||||
RBW = 1; // State of replica that is being written to
|
RBW = 1; // State of replica that is being written to
|
||||||
RWR = 2; // State of replica that is waiting to be recovered
|
RWR = 2; // State of replica that is waiting to be recovered
|
||||||
|
|
|
@ -19,12 +19,46 @@ package org.apache.hadoop.hdfs.protocolPB;
|
||||||
|
|
||||||
import static junit.framework.Assert.*;
|
import static junit.framework.Assert.*;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.BlockKey;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -47,9 +81,13 @@ public class TestPBHelper {
|
||||||
PBHelper.convert(NamenodeRoleProto.NAMENODE));
|
PBHelper.convert(NamenodeRoleProto.NAMENODE));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static StorageInfo getStorageInfo() {
|
||||||
|
return new StorageInfo(1, 2, "cid", 3);
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testConvertStoragInfo() {
|
public void testConvertStoragInfo() {
|
||||||
StorageInfo info = new StorageInfo(1, 2, "cid", 3);
|
StorageInfo info = getStorageInfo();
|
||||||
StorageInfoProto infoProto = PBHelper.convert(info);
|
StorageInfoProto infoProto = PBHelper.convert(info);
|
||||||
StorageInfo info2 = PBHelper.convert(infoProto);
|
StorageInfo info2 = PBHelper.convert(infoProto);
|
||||||
assertEquals(info.getClusterID(), info2.getClusterID());
|
assertEquals(info.getClusterID(), info2.getClusterID());
|
||||||
|
@ -60,7 +98,7 @@ public class TestPBHelper {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testConvertNamenodeRegistration() {
|
public void testConvertNamenodeRegistration() {
|
||||||
StorageInfo info = new StorageInfo(1, 2, "cid", 3);
|
StorageInfo info = getStorageInfo();
|
||||||
NamenodeRegistration reg = new NamenodeRegistration("address:999",
|
NamenodeRegistration reg = new NamenodeRegistration("address:999",
|
||||||
"http:1000", info, NamenodeRole.NAMENODE);
|
"http:1000", info, NamenodeRole.NAMENODE);
|
||||||
NamenodeRegistrationProto regProto = PBHelper.convert(reg);
|
NamenodeRegistrationProto regProto = PBHelper.convert(reg);
|
||||||
|
@ -76,4 +114,215 @@ public class TestPBHelper {
|
||||||
assertEquals(reg.getVersion(), reg2.getVersion());
|
assertEquals(reg.getVersion(), reg2.getVersion());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConvertDatanodeID() {
|
||||||
|
DatanodeID dn = new DatanodeID("node", "sid", 1, 2);
|
||||||
|
DatanodeIDProto dnProto = PBHelper.convert(dn);
|
||||||
|
DatanodeID dn2 = PBHelper.convert(dnProto);
|
||||||
|
assertEquals(dn.getHost(), dn2.getHost());
|
||||||
|
assertEquals(dn.getInfoPort(), dn2.getInfoPort());
|
||||||
|
assertEquals(dn.getIpcPort(), dn2.getIpcPort());
|
||||||
|
assertEquals(dn.getName(), dn2.getName());
|
||||||
|
assertEquals(dn.getPort(), dn2.getPort());
|
||||||
|
assertEquals(dn.getStorageID(), dn2.getStorageID());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConvertBlock() {
|
||||||
|
Block b = new Block(1, 100, 3);
|
||||||
|
BlockProto bProto = PBHelper.convert(b);
|
||||||
|
Block b2 = PBHelper.convert(bProto);
|
||||||
|
assertEquals(b, b2);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static BlockWithLocations getBlockWithLocations(int bid) {
|
||||||
|
return new BlockWithLocations(new Block(bid, 0, 1), new String[] { "dn1",
|
||||||
|
"dn2", "dn3" });
|
||||||
|
}
|
||||||
|
|
||||||
|
private void compare(BlockWithLocations locs1, BlockWithLocations locs2) {
|
||||||
|
assertEquals(locs1.getBlock(), locs2.getBlock());
|
||||||
|
assertTrue(Arrays.equals(locs1.getDatanodes(), locs2.getDatanodes()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConvertBlockWithLocations() {
|
||||||
|
BlockWithLocations locs = getBlockWithLocations(1);
|
||||||
|
BlockWithLocationsProto locsProto = PBHelper.convert(locs);
|
||||||
|
BlockWithLocations locs2 = PBHelper.convert(locsProto);
|
||||||
|
compare(locs, locs2);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConvertBlocksWithLocations() {
|
||||||
|
BlockWithLocations[] list = new BlockWithLocations[] {
|
||||||
|
getBlockWithLocations(1), getBlockWithLocations(2) };
|
||||||
|
BlocksWithLocations locs = new BlocksWithLocations(list);
|
||||||
|
BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
|
||||||
|
BlocksWithLocations locs2 = PBHelper.convert(locsProto);
|
||||||
|
BlockWithLocations[] blocks = locs.getBlocks();
|
||||||
|
BlockWithLocations[] blocks2 = locs2.getBlocks();
|
||||||
|
assertEquals(blocks.length, blocks2.length);
|
||||||
|
for (int i = 0; i < blocks.length; i++) {
|
||||||
|
compare(blocks[i], blocks2[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static BlockKey getBlockKey(int keyId) {
|
||||||
|
return new BlockKey(keyId, 10, "encodedKey".getBytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
private void compare(BlockKey k1, BlockKey k2) {
|
||||||
|
assertEquals(k1.getExpiryDate(), k2.getExpiryDate());
|
||||||
|
assertEquals(k1.getKeyId(), k2.getKeyId());
|
||||||
|
assertTrue(Arrays.equals(k1.getEncodedKey(), k2.getEncodedKey()));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConvertBlockKey() {
|
||||||
|
BlockKey key = getBlockKey(1);
|
||||||
|
BlockKeyProto keyProto = PBHelper.convert(key);
|
||||||
|
BlockKey key1 = PBHelper.convert(keyProto);
|
||||||
|
compare(key, key1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConvertExportedBlockKeys() {
|
||||||
|
BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
|
||||||
|
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
|
||||||
|
getBlockKey(1), keys);
|
||||||
|
ExportedBlockKeysProto expKeysProto = PBHelper.convert(expKeys);
|
||||||
|
ExportedBlockKeys expKeys1 = PBHelper.convert(expKeysProto);
|
||||||
|
|
||||||
|
BlockKey[] allKeys = expKeys.getAllKeys();
|
||||||
|
BlockKey[] allKeys1 = expKeys1.getAllKeys();
|
||||||
|
assertEquals(allKeys.length, allKeys1.length);
|
||||||
|
for (int i = 0; i < allKeys.length; i++) {
|
||||||
|
compare(allKeys[i], allKeys1[i]);
|
||||||
|
}
|
||||||
|
compare(expKeys.getCurrentKey(), expKeys1.getCurrentKey());
|
||||||
|
assertEquals(expKeys.getKeyUpdateInterval(),
|
||||||
|
expKeys1.getKeyUpdateInterval());
|
||||||
|
assertEquals(expKeys.getTokenLifetime(), expKeys1.getTokenLifetime());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConvertCheckpointSignature() {
|
||||||
|
CheckpointSignature s = new CheckpointSignature(getStorageInfo(), "bpid",
|
||||||
|
100, 1);
|
||||||
|
CheckpointSignatureProto sProto = PBHelper.convert(s);
|
||||||
|
CheckpointSignature s1 = PBHelper.convert(sProto);
|
||||||
|
assertEquals(s.getBlockpoolID(), s1.getBlockpoolID());
|
||||||
|
assertEquals(s.getClusterID(), s1.getClusterID());
|
||||||
|
assertEquals(s.getCTime(), s1.getCTime());
|
||||||
|
assertEquals(s.getCurSegmentTxId(), s1.getCurSegmentTxId());
|
||||||
|
assertEquals(s.getLayoutVersion(), s1.getLayoutVersion());
|
||||||
|
assertEquals(s.getMostRecentCheckpointTxId(),
|
||||||
|
s1.getMostRecentCheckpointTxId());
|
||||||
|
assertEquals(s.getNamespaceID(), s1.getNamespaceID());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void compare(RemoteEditLog l1, RemoteEditLog l2) {
|
||||||
|
assertEquals(l1.getEndTxId(), l2.getEndTxId());
|
||||||
|
assertEquals(l1.getStartTxId(), l2.getStartTxId());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConvertRemoteEditLog() {
|
||||||
|
RemoteEditLog l = new RemoteEditLog(1, 100);
|
||||||
|
RemoteEditLogProto lProto = PBHelper.convert(l);
|
||||||
|
RemoteEditLog l1 = PBHelper.convert(lProto);
|
||||||
|
compare(l, l1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConvertRemoteEditLogManifest() {
|
||||||
|
List<RemoteEditLog> logs = new ArrayList<RemoteEditLog>();
|
||||||
|
logs.add(new RemoteEditLog(1, 10));
|
||||||
|
logs.add(new RemoteEditLog(11, 20));
|
||||||
|
RemoteEditLogManifest m = new RemoteEditLogManifest(logs);
|
||||||
|
RemoteEditLogManifestProto mProto = PBHelper.convert(m);
|
||||||
|
RemoteEditLogManifest m1 = PBHelper.convert(mProto);
|
||||||
|
|
||||||
|
List<RemoteEditLog> logs1 = m1.getLogs();
|
||||||
|
assertEquals(logs.size(), logs1.size());
|
||||||
|
for (int i = 0; i < logs.size(); i++) {
|
||||||
|
compare(logs.get(i), logs1.get(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public ExtendedBlock getExtendedBlock() {
|
||||||
|
return new ExtendedBlock("bpid", 1, 100, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
public DatanodeInfo getDNInfo() {
|
||||||
|
return new DatanodeInfo(new DatanodeID("node", "sid", 1, 2));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
|
||||||
|
assertEquals(dn1.getAdminState(), dn2.getAdminState());
|
||||||
|
assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed());
|
||||||
|
assertEquals(dn1.getBlockPoolUsedPercent(), dn2.getBlockPoolUsedPercent());
|
||||||
|
assertEquals(dn1.getCapacity(), dn2.getCapacity());
|
||||||
|
assertEquals(dn1.getDatanodeReport(), dn2.getDatanodeReport());
|
||||||
|
assertEquals(dn1.getDfsUsed(), dn1.getDfsUsed());
|
||||||
|
assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent());
|
||||||
|
assertEquals(dn1.getHost(), dn2.getHost());
|
||||||
|
assertEquals(dn1.getHostName(), dn2.getHostName());
|
||||||
|
assertEquals(dn1.getInfoPort(), dn2.getInfoPort());
|
||||||
|
assertEquals(dn1.getIpcPort(), dn2.getIpcPort());
|
||||||
|
assertEquals(dn1.getLastUpdate(), dn2.getLastUpdate());
|
||||||
|
assertEquals(dn1.getLevel(), dn2.getLevel());
|
||||||
|
assertEquals(dn1.getNetworkLocation(), dn2.getNetworkLocation());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConvertExtendedBlock() {
|
||||||
|
ExtendedBlock b = getExtendedBlock();
|
||||||
|
ExtendedBlockProto bProto = PBHelper.convert(b);
|
||||||
|
ExtendedBlock b1 = PBHelper.convert(bProto);
|
||||||
|
assertEquals(b, b1);
|
||||||
|
|
||||||
|
b.setBlockId(-1);
|
||||||
|
bProto = PBHelper.convert(b);
|
||||||
|
b1 = PBHelper.convert(bProto);
|
||||||
|
assertEquals(b, b1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConvertRecoveringBlock() {
|
||||||
|
DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() };
|
||||||
|
RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3);
|
||||||
|
RecoveringBlockProto bProto = PBHelper.convert(b);
|
||||||
|
RecoveringBlock b1 = PBHelper.convert(bProto);
|
||||||
|
assertEquals(b.getBlock(), b1.getBlock());
|
||||||
|
DatanodeInfo[] dnInfo1 = b1.getLocations();
|
||||||
|
assertEquals(dnInfo.length, dnInfo1.length);
|
||||||
|
for (int i=0; i < dnInfo.length; i++) {
|
||||||
|
compare(dnInfo[0], dnInfo1[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConvertText() {
|
||||||
|
Text t = new Text("abc".getBytes());
|
||||||
|
String s = t.toString();
|
||||||
|
Text t1 = new Text(s);
|
||||||
|
assertEquals(t, t1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBlockTokenIdentifier() {
|
||||||
|
Token<BlockTokenIdentifier> token = new Token<BlockTokenIdentifier>(
|
||||||
|
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
|
||||||
|
new Text("service"));
|
||||||
|
BlockTokenIdentifierProto tokenProto = PBHelper.convert(token);
|
||||||
|
Token<BlockTokenIdentifier> token2 = PBHelper.convert(tokenProto);
|
||||||
|
assertTrue(Arrays.equals(token.getIdentifier(), token2.getIdentifier()));
|
||||||
|
assertTrue(Arrays.equals(token.getPassword(), token2.getPassword()));
|
||||||
|
assertEquals(token.getKind(), token2.getKind());
|
||||||
|
assertEquals(token.getService(), token2.getService());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,7 @@ public class TestClusterId {
|
||||||
private String getClusterId(Configuration config) throws IOException {
|
private String getClusterId(Configuration config) throws IOException {
|
||||||
// see if cluster id not empty.
|
// see if cluster id not empty.
|
||||||
Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(config);
|
Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(config);
|
||||||
Collection<URI> editsToFormat = new ArrayList<URI>(0);
|
Collection<URI> editsToFormat = FSNamesystem.getNamespaceEditsDirs(config);
|
||||||
FSImage fsImage = new FSImage(config, dirsToFormat, editsToFormat);
|
FSImage fsImage = new FSImage(config, dirsToFormat, editsToFormat);
|
||||||
|
|
||||||
Iterator<StorageDirectory> sdit =
|
Iterator<StorageDirectory> sdit =
|
||||||
|
|
|
@ -21,28 +21,32 @@ import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.mockito.Matchers.anyInt;
|
import static org.mockito.Matchers.anyInt;
|
||||||
import static org.mockito.Matchers.any;
|
import static org.mockito.Matchers.any;
|
||||||
|
import static org.mockito.Mockito.atLeast;
|
||||||
import static org.mockito.Mockito.doNothing;
|
import static org.mockito.Mockito.doNothing;
|
||||||
import static org.mockito.Mockito.doThrow;
|
import static org.mockito.Mockito.doThrow;
|
||||||
import static org.mockito.Mockito.spy;
|
import static org.mockito.Mockito.spy;
|
||||||
import static org.mockito.Mockito.times;
|
import static org.mockito.Mockito.times;
|
||||||
import static org.mockito.Mockito.verify;
|
import static org.mockito.Mockito.verify;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.commons.lang.StringUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
|
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.mockito.verification.VerificationMode;
|
||||||
|
|
||||||
public class TestEditLogJournalFailures {
|
public class TestEditLogJournalFailures {
|
||||||
|
|
||||||
private int editsPerformed = 0;
|
private int editsPerformed = 0;
|
||||||
private Configuration conf;
|
|
||||||
private MiniDFSCluster cluster;
|
private MiniDFSCluster cluster;
|
||||||
private FileSystem fs;
|
private FileSystem fs;
|
||||||
private Runtime runtime;
|
private Runtime runtime;
|
||||||
|
@ -53,8 +57,13 @@ public class TestEditLogJournalFailures {
|
||||||
*/
|
*/
|
||||||
@Before
|
@Before
|
||||||
public void setUpMiniCluster() throws IOException {
|
public void setUpMiniCluster() throws IOException {
|
||||||
conf = new HdfsConfiguration();
|
setUpMiniCluster(new HdfsConfiguration(), true);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
}
|
||||||
|
|
||||||
|
public void setUpMiniCluster(Configuration conf, boolean manageNameDfsDirs)
|
||||||
|
throws IOException {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
||||||
|
.manageNameDfsDirs(manageNameDfsDirs).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
|
|
||||||
|
@ -67,8 +76,10 @@ public class TestEditLogJournalFailures {
|
||||||
|
|
||||||
@After
|
@After
|
||||||
public void shutDownMiniCluster() throws IOException {
|
public void shutDownMiniCluster() throws IOException {
|
||||||
fs.close();
|
if (fs != null)
|
||||||
cluster.shutdown();
|
fs.close();
|
||||||
|
if (cluster != null)
|
||||||
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -109,7 +120,7 @@ public class TestEditLogJournalFailures {
|
||||||
assertTrue(doAnEdit());
|
assertTrue(doAnEdit());
|
||||||
// The previous edit could not be synced to any persistent storage, should
|
// The previous edit could not be synced to any persistent storage, should
|
||||||
// have halted the NN.
|
// have halted the NN.
|
||||||
assertExitInvocations(1);
|
assertExitInvocations(atLeast(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -125,6 +136,80 @@ public class TestEditLogJournalFailures {
|
||||||
assertFalse(cluster.getNameNode().isInSafeMode());
|
assertFalse(cluster.getNameNode().isInSafeMode());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSingleRequiredFailedEditsDirOnSetReadyToFlush()
|
||||||
|
throws IOException {
|
||||||
|
// Set one of the edits dirs to be required.
|
||||||
|
String[] editsDirs = cluster.getConfiguration(0).getTrimmedStrings(
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
|
||||||
|
shutDownMiniCluster();
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY, editsDirs[1]);
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, 0);
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, 0);
|
||||||
|
setUpMiniCluster(conf, true);
|
||||||
|
|
||||||
|
assertTrue(doAnEdit());
|
||||||
|
// Invalidated the one required edits journal.
|
||||||
|
invalidateEditsDirAtIndex(1, false, false);
|
||||||
|
// Make sure runtime.exit(...) hasn't been called at all yet.
|
||||||
|
assertExitInvocations(0);
|
||||||
|
|
||||||
|
// This will actually return true in the tests, since the NN will not in
|
||||||
|
// fact call Runtime.exit();
|
||||||
|
doAnEdit();
|
||||||
|
|
||||||
|
// A single failure of a required journal should result in a call to
|
||||||
|
// runtime.exit(...).
|
||||||
|
assertExitInvocations(atLeast(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMultipleRedundantFailedEditsDirOnSetReadyToFlush()
|
||||||
|
throws IOException {
|
||||||
|
// Set up 4 name/edits dirs.
|
||||||
|
shutDownMiniCluster();
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
String[] nameDirs = new String[4];
|
||||||
|
for (int i = 0; i < nameDirs.length; i++) {
|
||||||
|
File nameDir = new File(System.getProperty("test.build.data"),
|
||||||
|
"name-dir" + i);
|
||||||
|
nameDir.mkdirs();
|
||||||
|
nameDirs[i] = nameDir.getAbsolutePath();
|
||||||
|
}
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||||
|
StringUtils.join(nameDirs, ","));
|
||||||
|
|
||||||
|
// Keep running unless there are less than 2 edits dirs remaining.
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, 2);
|
||||||
|
setUpMiniCluster(conf, false);
|
||||||
|
|
||||||
|
// All journals active.
|
||||||
|
assertTrue(doAnEdit());
|
||||||
|
assertExitInvocations(0);
|
||||||
|
|
||||||
|
// Invalidate 1/4 of the redundant journals.
|
||||||
|
invalidateEditsDirAtIndex(0, false, false);
|
||||||
|
assertTrue(doAnEdit());
|
||||||
|
assertExitInvocations(0);
|
||||||
|
|
||||||
|
// Invalidate 2/4 of the redundant journals.
|
||||||
|
invalidateEditsDirAtIndex(1, false, false);
|
||||||
|
assertTrue(doAnEdit());
|
||||||
|
assertExitInvocations(0);
|
||||||
|
|
||||||
|
// Invalidate 3/4 of the redundant journals.
|
||||||
|
invalidateEditsDirAtIndex(2, false, false);
|
||||||
|
|
||||||
|
// This will actually return true in the tests, since the NN will not in
|
||||||
|
// fact call Runtime.exit();
|
||||||
|
doAnEdit();
|
||||||
|
|
||||||
|
// A failure of more than the minimum number of redundant journals should
|
||||||
|
// result in a call to runtime.exit(...).
|
||||||
|
assertExitInvocations(atLeast(1));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Replace the journal at index <code>index</code> with one that throws an
|
* Replace the journal at index <code>index</code> with one that throws an
|
||||||
* exception on flush.
|
* exception on flush.
|
||||||
|
@ -182,13 +267,24 @@ public class TestEditLogJournalFailures {
|
||||||
return fs.mkdirs(new Path("/tmp", Integer.toString(editsPerformed++)));
|
return fs.mkdirs(new Path("/tmp", Integer.toString(editsPerformed++)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Make sure that Runtime.exit(...) has been called exactly
|
||||||
|
* <code>expectedExits<code> number of times.
|
||||||
|
*
|
||||||
|
* @param expectedExits the exact number of times Runtime.exit(...) should
|
||||||
|
* have been called.
|
||||||
|
*/
|
||||||
|
private void assertExitInvocations(int expectedExits) {
|
||||||
|
assertExitInvocations(times(expectedExits));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Make sure that Runtime.exit(...) has been called
|
* Make sure that Runtime.exit(...) has been called
|
||||||
* <code>expectedExits<code> number of times.
|
* <code>expectedExits<code> number of times.
|
||||||
*
|
*
|
||||||
* @param expectedExits the number of times Runtime.exit(...) should have been called.
|
* @param expectedExits the number of times Runtime.exit(...) should have been called.
|
||||||
*/
|
*/
|
||||||
private void assertExitInvocations(int expectedExits) {
|
private void assertExitInvocations(VerificationMode expectedExits) {
|
||||||
verify(runtime, times(expectedExits)).exit(anyInt());
|
verify(runtime, expectedExits).exit(anyInt());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,177 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.io.Writable;
|
||||||
|
|
||||||
|
import java.net.URI;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class TestGenericJournalConf {
|
||||||
|
/**
|
||||||
|
* Test that an exception is thrown if a journal class doesn't exist
|
||||||
|
* in the configuration
|
||||||
|
*/
|
||||||
|
@Test(expected=IllegalArgumentException.class)
|
||||||
|
public void testNotConfigured() throws Exception {
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||||
|
"dummy://test");
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test that an exception is thrown if a journal class doesn't
|
||||||
|
* exist in the classloader.
|
||||||
|
*/
|
||||||
|
@Test(expected=IllegalArgumentException.class)
|
||||||
|
public void testClassDoesntExist() throws Exception {
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
|
||||||
|
"org.apache.hadoop.nonexistent");
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||||
|
"dummy://test");
|
||||||
|
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test that a implementation of JournalManager without a
|
||||||
|
* (Configuration,URI) constructor throws an exception
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testBadConstructor() throws Exception {
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
|
||||||
|
BadConstructorJournalManager.class.getName());
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||||
|
"dummy://test");
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
fail("Should have failed before this point");
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
if (!iae.getMessage().contains("Unable to construct journal")) {
|
||||||
|
fail("Should have failed with unable to construct exception");
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test that a dummy implementation of JournalManager can
|
||||||
|
* be initialized on startup
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testDummyJournalManager() throws Exception {
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
|
||||||
|
DummyJournalManager.class.getName());
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||||
|
"dummy://test");
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, 0);
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class DummyJournalManager implements JournalManager {
|
||||||
|
public DummyJournalManager(Configuration conf, URI u) {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public EditLogOutputStream startLogSegment(long txId) throws IOException {
|
||||||
|
return mock(EditLogOutputStream.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void finalizeLogSegment(long firstTxId, long lastTxId)
|
||||||
|
throws IOException {
|
||||||
|
// noop
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public EditLogInputStream getInputStream(long fromTxnId)
|
||||||
|
throws IOException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getNumberOfTransactions(long fromTxnId)
|
||||||
|
throws IOException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setOutputBufferCapacity(int size) {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void purgeLogsOlderThan(long minTxIdToKeep)
|
||||||
|
throws IOException {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void recoverUnfinalizedSegments() throws IOException {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class BadConstructorJournalManager extends DummyJournalManager {
|
||||||
|
public BadConstructorJournalManager() {
|
||||||
|
super(null, null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -19,21 +19,20 @@ package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.net.URISyntaxException;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.DF;
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.CheckedVolume;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
@ -49,7 +48,7 @@ public class TestNameNodeResourceChecker {
|
||||||
baseDir = new File(System.getProperty("test.build.data"));
|
baseDir = new File(System.getProperty("test.build.data"));
|
||||||
nameDir = new File(baseDir, "resource-check-name-dir");
|
nameDir = new File(baseDir, "resource-check-name-dir");
|
||||||
nameDir.mkdirs();
|
nameDir.mkdirs();
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -90,7 +89,7 @@ public class TestNameNodeResourceChecker {
|
||||||
throws IOException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
try {
|
try {
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
|
||||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, 1);
|
conf.setLong(DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, 1);
|
||||||
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
|
@ -145,7 +144,7 @@ public class TestNameNodeResourceChecker {
|
||||||
File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
|
File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
|
||||||
nameDir1.mkdirs();
|
nameDir1.mkdirs();
|
||||||
nameDir2.mkdirs();
|
nameDir2.mkdirs();
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||||
nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath());
|
nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath());
|
||||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, Long.MAX_VALUE);
|
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, Long.MAX_VALUE);
|
||||||
|
|
||||||
|
@ -164,7 +163,7 @@ public class TestNameNodeResourceChecker {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
File nameDir = new File(System.getProperty("test.build.data"), "name-dir");
|
File nameDir = new File(System.getProperty("test.build.data"), "name-dir");
|
||||||
nameDir.mkdirs();
|
nameDir.mkdirs();
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath());
|
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath());
|
||||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, Long.MAX_VALUE);
|
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, Long.MAX_VALUE);
|
||||||
|
|
||||||
|
@ -176,38 +175,70 @@ public class TestNameNodeResourceChecker {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test that the NN is considered to be out of resources only once all
|
* Test that the NN is considered to be out of resources only once all
|
||||||
* configured volumes are low on resources.
|
* redundant configured volumes are low on resources, or when any required
|
||||||
|
* volume is low on resources.
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testLowResourceVolumePolicy() throws IOException {
|
public void testLowResourceVolumePolicy() throws IOException, URISyntaxException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1");
|
File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1");
|
||||||
File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
|
File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
|
||||||
nameDir1.mkdirs();
|
nameDir1.mkdirs();
|
||||||
nameDir2.mkdirs();
|
nameDir2.mkdirs();
|
||||||
|
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||||
nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath());
|
nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath());
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, 2);
|
||||||
|
|
||||||
NameNodeResourceChecker nnrc = new NameNodeResourceChecker(conf);
|
NameNodeResourceChecker nnrc = new NameNodeResourceChecker(conf);
|
||||||
|
|
||||||
// For the purpose of this test, we need to force the name dirs to appear to
|
// For the purpose of this test, we need to force the name dirs to appear to
|
||||||
// be on different volumes.
|
// be on different volumes.
|
||||||
Map<String, DF> volumes = new HashMap<String, DF>();
|
Map<String, CheckedVolume> volumes = new HashMap<String, CheckedVolume>();
|
||||||
volumes.put("volume1", new DF(nameDir1, conf));
|
CheckedVolume volume1 = Mockito.mock(CheckedVolume.class);
|
||||||
volumes.put("volume2", new DF(nameDir2, conf));
|
CheckedVolume volume2 = Mockito.mock(CheckedVolume.class);
|
||||||
|
CheckedVolume volume3 = Mockito.mock(CheckedVolume.class);
|
||||||
|
CheckedVolume volume4 = Mockito.mock(CheckedVolume.class);
|
||||||
|
CheckedVolume volume5 = Mockito.mock(CheckedVolume.class);
|
||||||
|
Mockito.when(volume1.isResourceAvailable()).thenReturn(true);
|
||||||
|
Mockito.when(volume2.isResourceAvailable()).thenReturn(true);
|
||||||
|
Mockito.when(volume3.isResourceAvailable()).thenReturn(true);
|
||||||
|
Mockito.when(volume4.isResourceAvailable()).thenReturn(true);
|
||||||
|
Mockito.when(volume5.isResourceAvailable()).thenReturn(true);
|
||||||
|
|
||||||
|
// Make volumes 4 and 5 required.
|
||||||
|
Mockito.when(volume4.isRequired()).thenReturn(true);
|
||||||
|
Mockito.when(volume5.isRequired()).thenReturn(true);
|
||||||
|
|
||||||
|
volumes.put("volume1", volume1);
|
||||||
|
volumes.put("volume2", volume2);
|
||||||
|
volumes.put("volume3", volume3);
|
||||||
|
volumes.put("volume4", volume4);
|
||||||
|
volumes.put("volume5", volume5);
|
||||||
nnrc.setVolumes(volumes);
|
nnrc.setVolumes(volumes);
|
||||||
|
|
||||||
NameNodeResourceChecker spyNnrc = Mockito.spy(nnrc);
|
// Initially all dirs have space.
|
||||||
|
assertTrue(nnrc.hasAvailableDiskSpace());
|
||||||
|
|
||||||
Mockito.when(spyNnrc.getVolumesLowOnSpace()).thenReturn(
|
// 1/3 redundant dir is low on space.
|
||||||
Lists.newArrayList("volume1"));
|
Mockito.when(volume1.isResourceAvailable()).thenReturn(false);
|
||||||
|
assertTrue(nnrc.hasAvailableDiskSpace());
|
||||||
|
|
||||||
assertTrue(spyNnrc.hasAvailableDiskSpace());
|
// 2/3 redundant dirs are low on space.
|
||||||
|
Mockito.when(volume2.isResourceAvailable()).thenReturn(false);
|
||||||
|
assertFalse(nnrc.hasAvailableDiskSpace());
|
||||||
|
|
||||||
Mockito.when(spyNnrc.getVolumesLowOnSpace()).thenReturn(
|
// Lower the minimum number of redundant volumes that must be available.
|
||||||
Lists.newArrayList("volume1", "volume2"));
|
nnrc.setMinimumReduntdantVolumes(1);
|
||||||
|
assertTrue(nnrc.hasAvailableDiskSpace());
|
||||||
|
|
||||||
assertFalse(spyNnrc.hasAvailableDiskSpace());
|
// Just one required dir is low on space.
|
||||||
|
Mockito.when(volume3.isResourceAvailable()).thenReturn(false);
|
||||||
|
assertFalse(nnrc.hasAvailableDiskSpace());
|
||||||
|
|
||||||
|
// Just the other required dir is low on space.
|
||||||
|
Mockito.when(volume3.isResourceAvailable()).thenReturn(true);
|
||||||
|
Mockito.when(volume4.isResourceAvailable()).thenReturn(false);
|
||||||
|
assertFalse(nnrc.hasAvailableDiskSpace());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,107 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collection;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
public class TestNameNodeResourcePolicy {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSingleRedundantResource() {
|
||||||
|
assertTrue(testResourceScenario(1, 0, 0, 0, 1));
|
||||||
|
assertFalse(testResourceScenario(1, 0, 1, 0, 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSingleRequiredResource() {
|
||||||
|
assertTrue(testResourceScenario(0, 1, 0, 0, 0));
|
||||||
|
assertFalse(testResourceScenario(0, 1, 0, 1, 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMultipleRedundantResources() {
|
||||||
|
assertTrue(testResourceScenario(4, 0, 0, 0, 4));
|
||||||
|
assertFalse(testResourceScenario(4, 0, 1, 0, 4));
|
||||||
|
assertTrue(testResourceScenario(4, 0, 1, 0, 3));
|
||||||
|
assertFalse(testResourceScenario(4, 0, 2, 0, 3));
|
||||||
|
assertTrue(testResourceScenario(4, 0, 2, 0, 2));
|
||||||
|
assertFalse(testResourceScenario(4, 0, 3, 0, 2));
|
||||||
|
assertTrue(testResourceScenario(4, 0, 3, 0, 1));
|
||||||
|
assertFalse(testResourceScenario(4, 0, 4, 0, 1));
|
||||||
|
try {
|
||||||
|
testResourceScenario(1, 0, 0, 0, 2);
|
||||||
|
fail("Should fail if there are more minimum redundant resources than " +
|
||||||
|
"total redundant resources");
|
||||||
|
} catch (RuntimeException rte) {
|
||||||
|
assertTrue(rte.getMessage().startsWith("Need a minimum"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMultipleRequiredResources() {
|
||||||
|
assertTrue(testResourceScenario(0, 3, 0, 0, 0));
|
||||||
|
assertFalse(testResourceScenario(0, 3, 0, 1, 0));
|
||||||
|
assertFalse(testResourceScenario(0, 3, 0, 2, 0));
|
||||||
|
assertFalse(testResourceScenario(0, 3, 0, 3, 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRedundantWithRequiredResources() {
|
||||||
|
assertTrue(testResourceScenario(2, 2, 0, 0, 1));
|
||||||
|
assertTrue(testResourceScenario(2, 2, 1, 0, 1));
|
||||||
|
assertFalse(testResourceScenario(2, 2, 2, 0, 1));
|
||||||
|
assertFalse(testResourceScenario(2, 2, 0, 1, 1));
|
||||||
|
assertFalse(testResourceScenario(2, 2, 1, 1, 1));
|
||||||
|
assertFalse(testResourceScenario(2, 2, 2, 1, 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean testResourceScenario(
|
||||||
|
int numRedundantResources,
|
||||||
|
int numRequiredResources,
|
||||||
|
int numFailedRedundantResources,
|
||||||
|
int numFailedRequiredResources,
|
||||||
|
int minimumRedundantResources) {
|
||||||
|
|
||||||
|
Collection<CheckableNameNodeResource> resources =
|
||||||
|
new ArrayList<CheckableNameNodeResource>();
|
||||||
|
|
||||||
|
for (int i = 0; i < numRedundantResources; i++) {
|
||||||
|
CheckableNameNodeResource r = mock(CheckableNameNodeResource.class);
|
||||||
|
when(r.isRequired()).thenReturn(false);
|
||||||
|
when(r.isResourceAvailable()).thenReturn(i >= numFailedRedundantResources);
|
||||||
|
resources.add(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < numRequiredResources; i++) {
|
||||||
|
CheckableNameNodeResource r = mock(CheckableNameNodeResource.class);
|
||||||
|
when(r.isRequired()).thenReturn(true);
|
||||||
|
when(r.isResourceAvailable()).thenReturn(i >= numFailedRequiredResources);
|
||||||
|
resources.add(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
return NameNodeResourcePolicy.areResourcesAvailable(resources,
|
||||||
|
minimumRedundantResources);
|
||||||
|
}
|
||||||
|
}
|
|
@ -40,9 +40,9 @@ Trunk (unreleased changes)
|
||||||
MAPREDUCE-3415. improve MiniMRYarnCluster & DistributedShell JAR resolution.
|
MAPREDUCE-3415. improve MiniMRYarnCluster & DistributedShell JAR resolution.
|
||||||
(tucu)
|
(tucu)
|
||||||
|
|
||||||
HADOOP-7862 MR changes to work with HADOOP 7862:
|
HADOOP-7862 MR changes to work with HADOOP 7862:
|
||||||
Move the support for multiple protocols to lower layer so that Writable,
|
Move the support for multiple protocols to lower layer so that Writable,
|
||||||
PB and Avro can all use it (Sanjay)
|
PB and Avro can all use it (Sanjay)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
MAPREDUCE-3412. Fix 'ant docs'. (amarrk)
|
MAPREDUCE-3412. Fix 'ant docs'. (amarrk)
|
||||||
|
@ -74,6 +74,9 @@ Trunk (unreleased changes)
|
||||||
|
|
||||||
MAPREDUCE-3500. MRJobConfig creates an LD_LIBRARY_PATH using the platform ARCH. (tucu)
|
MAPREDUCE-3500. MRJobConfig creates an LD_LIBRARY_PATH using the platform ARCH. (tucu)
|
||||||
|
|
||||||
|
MAPREDUCE-3389. MRApps loads the 'mrapp-generated-classpath' file with
|
||||||
|
classpath from the build machine. (tucu)
|
||||||
|
|
||||||
Release 0.23.1 - Unreleased
|
Release 0.23.1 - Unreleased
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -243,6 +246,12 @@ Release 0.23.1 - Unreleased
|
||||||
MAPREDUCE-3485. DISKS_FAILED -101 error code should be defined in same location as
|
MAPREDUCE-3485. DISKS_FAILED -101 error code should be defined in same location as
|
||||||
ABORTED_CONTAINER_EXIT_STATUS. (Ravi Gummadi via mahadev)
|
ABORTED_CONTAINER_EXIT_STATUS. (Ravi Gummadi via mahadev)
|
||||||
|
|
||||||
|
MAPREDUCE-3496. Fixed client to print queue acls in consistent order.
|
||||||
|
(Jonathan Eagles via acmurthy)
|
||||||
|
|
||||||
|
MAPREDUCE-3147. Handle leaf queues with the same name properly. (Ravi Prakash via
|
||||||
|
mahadev)
|
||||||
|
|
||||||
Release 0.23.0 - 2011-11-01
|
Release 0.23.0 - 2011-11-01
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -22,6 +22,7 @@ import static org.apache.hadoop.yarn.util.StringHelper._join;
|
||||||
import static org.apache.hadoop.yarn.util.StringHelper._split;
|
import static org.apache.hadoop.yarn.util.StringHelper._split;
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.InputStreamReader;
|
import java.io.InputStreamReader;
|
||||||
|
@ -180,18 +181,23 @@ public class MRApps extends Apps {
|
||||||
String mrAppGeneratedClasspathFile = "mrapp-generated-classpath";
|
String mrAppGeneratedClasspathFile = "mrapp-generated-classpath";
|
||||||
classpathFileStream =
|
classpathFileStream =
|
||||||
thisClassLoader.getResourceAsStream(mrAppGeneratedClasspathFile);
|
thisClassLoader.getResourceAsStream(mrAppGeneratedClasspathFile);
|
||||||
|
// Put the file itself on classpath for tasks.
|
||||||
|
String classpathElement = thisClassLoader.getResource(mrAppGeneratedClasspathFile).getFile();
|
||||||
|
if (classpathElement.contains("!")) {
|
||||||
|
classpathElement = classpathElement.substring(0, classpathElement.indexOf("!"));
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
classpathElement = new File(classpathElement).getParent();
|
||||||
|
}
|
||||||
|
Apps.addToEnvironment(
|
||||||
|
environment,
|
||||||
|
Environment.CLASSPATH.name(), classpathElement);
|
||||||
|
|
||||||
reader = new BufferedReader(new InputStreamReader(classpathFileStream));
|
reader = new BufferedReader(new InputStreamReader(classpathFileStream));
|
||||||
String cp = reader.readLine();
|
String cp = reader.readLine();
|
||||||
if (cp != null) {
|
if (cp != null) {
|
||||||
Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), cp.trim());
|
Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), cp.trim());
|
||||||
}
|
}
|
||||||
// Put the file itself on classpath for tasks.
|
|
||||||
Apps.addToEnvironment(
|
|
||||||
environment,
|
|
||||||
Environment.CLASSPATH.name(),
|
|
||||||
thisClassLoader.getResource(mrAppGeneratedClasspathFile).getFile()
|
|
||||||
.split("!")[0]);
|
|
||||||
|
|
||||||
// Add standard Hadoop classes
|
// Add standard Hadoop classes
|
||||||
for (String c : ApplicationConstants.APPLICATION_CLASSPATH) {
|
for (String c : ApplicationConstants.APPLICATION_CLASSPATH) {
|
||||||
Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), c);
|
Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), c);
|
||||||
|
|
|
@ -22,6 +22,7 @@ import java.io.PrintWriter;
|
||||||
import java.io.Writer;
|
import java.io.Writer;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configured;
|
import org.apache.hadoop.conf.Configured;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
@ -200,6 +201,7 @@ class JobQueueClient extends Configured implements Tool {
|
||||||
for (QueueAclsInfo queueInfo : queueAclsInfoList) {
|
for (QueueAclsInfo queueInfo : queueAclsInfoList) {
|
||||||
System.out.print(queueInfo.getQueueName() + " ");
|
System.out.print(queueInfo.getQueueName() + " ");
|
||||||
String[] ops = queueInfo.getOperations();
|
String[] ops = queueInfo.getOperations();
|
||||||
|
Arrays.sort(ops);
|
||||||
int max = ops.length - 1;
|
int max = ops.length - 1;
|
||||||
for (int j = 0; j < ops.length; j++) {
|
for (int j = 0; j < ops.length; j++) {
|
||||||
System.out.print(ops[j].replaceFirst("acl-", ""));
|
System.out.print(ops[j].replaceFirst("acl-", ""));
|
||||||
|
|
|
@ -214,7 +214,8 @@ implements ResourceScheduler, CapacitySchedulerContext {
|
||||||
private static final QueueHook noop = new QueueHook();
|
private static final QueueHook noop = new QueueHook();
|
||||||
|
|
||||||
@Lock(CapacityScheduler.class)
|
@Lock(CapacityScheduler.class)
|
||||||
private void initializeQueues(CapacitySchedulerConfiguration conf) {
|
private void initializeQueues(CapacitySchedulerConfiguration conf)
|
||||||
|
throws IOException {
|
||||||
root =
|
root =
|
||||||
parseQueue(this, conf, null, ROOT, queues, queues,
|
parseQueue(this, conf, null, ROOT, queues, queues,
|
||||||
queueComparator, applicationComparator, noop);
|
queueComparator, applicationComparator, noop);
|
||||||
|
@ -283,7 +284,7 @@ implements ResourceScheduler, CapacitySchedulerContext {
|
||||||
Map<String, CSQueue> oldQueues,
|
Map<String, CSQueue> oldQueues,
|
||||||
Comparator<CSQueue> queueComparator,
|
Comparator<CSQueue> queueComparator,
|
||||||
Comparator<SchedulerApp> applicationComparator,
|
Comparator<SchedulerApp> applicationComparator,
|
||||||
QueueHook hook) {
|
QueueHook hook) throws IOException {
|
||||||
CSQueue queue;
|
CSQueue queue;
|
||||||
String[] childQueueNames =
|
String[] childQueueNames =
|
||||||
conf.getQueues((parent == null) ?
|
conf.getQueues((parent == null) ?
|
||||||
|
@ -316,6 +317,11 @@ implements ResourceScheduler, CapacitySchedulerContext {
|
||||||
parentQueue.setChildQueues(childQueues);
|
parentQueue.setChildQueues(childQueues);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(queue instanceof LeafQueue == true && queues.containsKey(queueName)
|
||||||
|
&& queues.get(queueName) instanceof LeafQueue == true) {
|
||||||
|
throw new IOException("Two leaf queues were named " + queueName
|
||||||
|
+ ". Leaf queue names must be distinct");
|
||||||
|
}
|
||||||
queues.put(queueName, queue);
|
queues.put(queueName, queue);
|
||||||
|
|
||||||
LOG.info("Initialized queue: " + queue);
|
LOG.info("Initialized queue: " + queue);
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
import static org.mockito.Mockito.*;
|
import static org.mockito.Mockito.*;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
@ -44,7 +45,7 @@ public class TestApplicationLimits {
|
||||||
LeafQueue queue;
|
LeafQueue queue;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() {
|
public void setUp() throws IOException {
|
||||||
CapacitySchedulerConfiguration csConf =
|
CapacitySchedulerConfiguration csConf =
|
||||||
new CapacitySchedulerConfiguration();
|
new CapacitySchedulerConfiguration();
|
||||||
setupQueueConfiguration(csConf);
|
setupQueueConfiguration(csConf);
|
||||||
|
|
|
@ -240,4 +240,21 @@ public class TestCapacityScheduler {
|
||||||
node.checkResourceUsage();
|
node.checkResourceUsage();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Test that parseQueue throws an exception when two leaf queues have the
|
||||||
|
* same name
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
@Test(expected=IOException.class)
|
||||||
|
public void testParseQueue() throws IOException {
|
||||||
|
CapacityScheduler cs = new CapacityScheduler();
|
||||||
|
|
||||||
|
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
|
||||||
|
setupQueueConfiguration(conf);
|
||||||
|
conf.setQueues(CapacityScheduler.ROOT + ".a.a1", new String[] {"b1"} );
|
||||||
|
conf.setCapacity(CapacityScheduler.ROOT + ".a.a1.b1", 100);
|
||||||
|
conf.setUserLimitFactor(CapacityScheduler.ROOT + ".a.a1.b1", 100.0f);
|
||||||
|
|
||||||
|
cs.reinitialize(conf, null, null);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue