Merge remote-tracking branch 'apache/trunk' into HDFS-7285

This commit is contained in:
Zhe Zhang 2015-08-27 16:23:41 -07:00
commit ab56fcdb12
346 changed files with 9613 additions and 3828 deletions

View File

@ -145,6 +145,13 @@ public class AuthenticationFilter implements Filter {
public static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file";
/**
* Constant for the configuration property
* that indicates the max inactive interval of the generated token.
*/
public static final String
AUTH_TOKEN_MAX_INACTIVE_INTERVAL = "token.MaxInactiveInterval";
/**
* Constant for the configuration property that indicates the validity of the generated token.
*/
@ -190,6 +197,7 @@ public class AuthenticationFilter implements Filter {
private Signer signer;
private SignerSecretProvider secretProvider;
private AuthenticationHandler authHandler;
private long maxInactiveInterval;
private long validity;
private String cookieDomain;
private String cookiePath;
@ -227,6 +235,8 @@ public class AuthenticationFilter implements Filter {
authHandlerClassName = authHandlerName;
}
maxInactiveInterval = Long.parseLong(config.getProperty(
AUTH_TOKEN_MAX_INACTIVE_INTERVAL, "1800")) * 1000; // 30 minutes;
validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, "36000"))
* 1000; //10 hours
initializeSecretProvider(filterConfig);
@ -354,6 +364,15 @@ public class AuthenticationFilter implements Filter {
.class;
}
/**
* Returns the max inactive interval time of the generated tokens.
*
* @return the max inactive interval time of the generated tokens in seconds.
*/
protected long getMaxInactiveInterval() {
return maxInactiveInterval / 1000;
}
/**
* Returns the validity time of the generated tokens.
*
@ -510,8 +529,10 @@ public class AuthenticationFilter implements Filter {
* @throws ServletException thrown if a processing error occurred.
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain)
throws IOException, ServletException {
public void doFilter(ServletRequest request,
ServletResponse response,
FilterChain filterChain)
throws IOException, ServletException {
boolean unauthorizedResponse = true;
int errCode = HttpServletResponse.SC_UNAUTHORIZED;
AuthenticationException authenticationEx = null;
@ -533,19 +554,27 @@ public class AuthenticationFilter implements Filter {
if (authHandler.managementOperation(token, httpRequest, httpResponse)) {
if (token == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Request [{}] triggering authentication", getRequestURL(httpRequest));
LOG.debug("Request [{}] triggering authentication",
getRequestURL(httpRequest));
}
token = authHandler.authenticate(httpRequest, httpResponse);
if (token != null && token.getExpires() != 0 &&
token != AuthenticationToken.ANONYMOUS) {
token.setExpires(System.currentTimeMillis() + getValidity() * 1000);
if (token != null && token != AuthenticationToken.ANONYMOUS) {
if (token.getMaxInactives() != 0) {
token.setMaxInactives(System.currentTimeMillis()
+ getMaxInactiveInterval() * 1000);
}
if (token.getExpires() != 0) {
token.setExpires(System.currentTimeMillis()
+ getValidity() * 1000);
}
}
newToken = true;
}
if (token != null) {
unauthorizedResponse = false;
if (LOG.isDebugEnabled()) {
LOG.debug("Request [{}] user [{}] authenticated", getRequestURL(httpRequest), token.getUserName());
LOG.debug("Request [{}] user [{}] authenticated",
getRequestURL(httpRequest), token.getUserName());
}
final AuthenticationToken authToken = token;
httpRequest = new HttpServletRequestWrapper(httpRequest) {
@ -562,10 +591,22 @@ public class AuthenticationFilter implements Filter {
@Override
public Principal getUserPrincipal() {
return (authToken != AuthenticationToken.ANONYMOUS) ? authToken : null;
return (authToken != AuthenticationToken.ANONYMOUS) ?
authToken : null;
}
};
if (newToken && !token.isExpired() && token != AuthenticationToken.ANONYMOUS) {
// If cookie persistence is configured to false,
// it means the cookie will be a session cookie.
// If the token is an old one, renew the its maxInactiveInterval.
if (!newToken && !isCookiePersistent()
&& getMaxInactiveInterval() > 0) {
token.setMaxInactives(System.currentTimeMillis()
+ getMaxInactiveInterval() * 1000);
newToken = true;
}
if (newToken && !token.isExpired()
&& token != AuthenticationToken.ANONYMOUS) {
String signedToken = signer.sign(token.toString());
createAuthCookie(httpResponse, signedToken, getCookieDomain(),
getCookiePath(), token.getExpires(),
@ -628,12 +669,10 @@ public class AuthenticationFilter implements Filter {
* @param resp the response object.
* @param token authentication token for the cookie.
* @param domain the cookie domain.
* @param path the cokie path.
* @param path the cookie path.
* @param expires UNIX timestamp that indicates the expire date of the
* cookie. It has no effect if its value < 0.
* @param isSecure is the cookie secure?
* @param token the token.
* @param expires the cookie expiration time.
* @param isCookiePersistent whether the cookie is persistent or not.
*
* XXX the following code duplicate some logic in Jetty / Servlet API,

View File

@ -42,6 +42,7 @@ public class AuthenticationToken extends AuthToken {
private AuthenticationToken(AuthToken token) {
super(token.getUserName(), token.getName(), token.getType());
setMaxInactives(token.getMaxInactives());
setExpires(token.getExpires());
}
@ -58,6 +59,17 @@ public class AuthenticationToken extends AuthToken {
super(userName, principal, type);
}
/**
* Sets the max inactive time of the token.
*
* @param max inactive time of the token in milliseconds since the epoch.
*/
public void setMaxInactives(long maxInactives) {
if (this != AuthenticationToken.ANONYMOUS) {
super.setMaxInactives(maxInactives);
}
}
/**
* Sets the expiration of the token.
*

View File

@ -34,15 +34,18 @@ public class AuthToken implements Principal {
private static final String ATTR_SEPARATOR = "&";
private static final String USER_NAME = "u";
private static final String PRINCIPAL = "p";
private static final String MAX_INACTIVES = "i";
private static final String EXPIRES = "e";
private static final String TYPE = "t";
private final static Set<String> ATTRIBUTES =
new HashSet<String>(Arrays.asList(USER_NAME, PRINCIPAL, EXPIRES, TYPE));
new HashSet<String>(Arrays.asList(USER_NAME, PRINCIPAL,
MAX_INACTIVES, EXPIRES, TYPE));
private String userName;
private String principal;
private String type;
private long maxInactives;
private long expires;
private String tokenStr;
@ -50,6 +53,7 @@ public class AuthToken implements Principal {
userName = null;
principal = null;
type = null;
maxInactives = -1;
expires = -1;
tokenStr = "ANONYMOUS";
generateToken();
@ -73,6 +77,7 @@ public class AuthToken implements Principal {
this.userName = userName;
this.principal = principal;
this.type = type;
this.maxInactives = -1;
this.expires = -1;
}
@ -88,6 +93,16 @@ public class AuthToken implements Principal {
}
}
/**
* Sets the max inactive interval of the token.
*
* @param interval max inactive interval of the token in milliseconds since
* the epoch.
*/
public void setMaxInactives(long interval) {
this.maxInactives = interval;
}
/**
* Sets the expiration of the token.
*
@ -104,7 +119,10 @@ public class AuthToken implements Principal {
* @return true if the token has expired.
*/
public boolean isExpired() {
return getExpires() != -1 && System.currentTimeMillis() > getExpires();
return (getMaxInactives() != -1 &&
System.currentTimeMillis() > getMaxInactives())
|| (getExpires() != -1 &&
System.currentTimeMillis() > getExpires());
}
/**
@ -115,6 +133,8 @@ public class AuthToken implements Principal {
sb.append(USER_NAME).append("=").append(getUserName()).append(ATTR_SEPARATOR);
sb.append(PRINCIPAL).append("=").append(getName()).append(ATTR_SEPARATOR);
sb.append(TYPE).append("=").append(getType()).append(ATTR_SEPARATOR);
sb.append(MAX_INACTIVES).append("=")
.append(getMaxInactives()).append(ATTR_SEPARATOR);
sb.append(EXPIRES).append("=").append(getExpires());
tokenStr = sb.toString();
}
@ -147,6 +167,15 @@ public class AuthToken implements Principal {
return type;
}
/**
* Returns the max inactive time of the token.
*
* @return the max inactive time of the token, in milliseconds since Epoc.
*/
public long getMaxInactives() {
return maxInactives;
}
/**
* Returns the expiration time of the token.
*
@ -183,8 +212,10 @@ public class AuthToken implements Principal {
if (!map.keySet().equals(ATTRIBUTES)) {
throw new AuthenticationException("Invalid token string, missing attributes");
}
long maxInactives = Long.parseLong(map.get(MAX_INACTIVES));
long expires = Long.parseLong(map.get(EXPIRES));
AuthToken token = new AuthToken(map.get(USER_NAME), map.get(PRINCIPAL), map.get(TYPE));
token.setMaxInactives(maxInactives);
token.setExpires(expires);
return token;
}

View File

@ -18,11 +18,10 @@ import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.net.HttpCookie;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Vector;
@ -53,6 +52,7 @@ import static org.junit.Assert.assertThat;
public class TestAuthenticationFilter {
private static final long TOKEN_VALIDITY_SEC = 1000;
private static final long TOKEN_MAX_INACTIVE_INTERVAL = 1000;
@Test
public void testGetConfiguration() throws Exception {
@ -595,7 +595,7 @@ public class TestAuthenticationFilter {
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
FilterChain chain = Mockito.mock(FilterChain.class);
final HashMap<String, String> cookieMap = new HashMap<String, String>();
final Map<String, String> cookieMap = new HashMap<String, String>();
Mockito.doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
@ -644,7 +644,7 @@ public class TestAuthenticationFilter {
}
}
private static void parseCookieMap(String cookieHeader, HashMap<String,
private static void parseCookieMap(String cookieHeader, Map<String,
String> cookieMap) {
List<HttpCookie> cookies = HttpCookie.parse(cookieHeader);
for (HttpCookie cookie : cookies) {
@ -761,7 +761,7 @@ public class TestAuthenticationFilter {
FilterChain chain = Mockito.mock(FilterChain.class);
final HashMap<String, String> cookieMap = new HashMap<String, String>();
final Map<String, String> cookieMap = new HashMap<String, String>();
Mockito.doAnswer(
new Answer<Object>() {
@Override
@ -844,13 +844,164 @@ public class TestAuthenticationFilter {
}
}
@Test
public void
testDoFilterAuthenticationAuthorized() throws Exception {
// Both expired period and MaxInActiveInterval are not reached.
long maxInactives = System.currentTimeMillis()
+ TOKEN_MAX_INACTIVE_INTERVAL;
long expires = System.currentTimeMillis() + TOKEN_VALIDITY_SEC;
boolean authorized = true;
_testDoFilterAuthenticationMaxInactiveInterval(maxInactives,
expires,
authorized);
}
@Test
public void
testDoFilterAuthenticationUnauthorizedExpired() throws Exception {
// Expired period is reached, MaxInActiveInterval is not reached.
long maxInactives = System.currentTimeMillis()
+ TOKEN_MAX_INACTIVE_INTERVAL;
long expires = System.currentTimeMillis() - TOKEN_VALIDITY_SEC;
boolean authorized = false;
_testDoFilterAuthenticationMaxInactiveInterval(maxInactives,
expires,
authorized);
}
@Test
public void
testDoFilterAuthenticationUnauthorizedInactived() throws Exception {
// Expired period is not reached, MaxInActiveInterval is reached.
long maxInactives = System.currentTimeMillis()
- TOKEN_MAX_INACTIVE_INTERVAL;
long expires = System.currentTimeMillis() + TOKEN_VALIDITY_SEC;
boolean authorized = false;
_testDoFilterAuthenticationMaxInactiveInterval(maxInactives,
expires,
authorized);
}
@Test
public void
testDoFilterAuthenticationUnauthorizedInactivedExpired()
throws Exception {
// Both expired period and MaxInActiveInterval is reached.
long maxInactives = System.currentTimeMillis()
- TOKEN_MAX_INACTIVE_INTERVAL;
long expires = System.currentTimeMillis() - TOKEN_VALIDITY_SEC;
boolean authorized = false;
_testDoFilterAuthenticationMaxInactiveInterval(maxInactives,
expires,
authorized);
}
private void
_testDoFilterAuthenticationMaxInactiveInterval(long maxInactives,
long expires,
boolean authorized)
throws Exception {
String secret = "secret";
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).
thenReturn("true");
Mockito.when(config.getInitParameter(
AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameter(
AuthenticationFilter.SIGNATURE_SECRET)).thenReturn(secret);
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(
Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.SIGNATURE_SECRET,
"management.operation.return")).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRequestURL()).thenReturn(
new StringBuffer("http://foo:8080/bar"));
AuthenticationToken token = new AuthenticationToken("u", "p",
DummyAuthenticationHandler.TYPE);
token.setMaxInactives(maxInactives);
token.setExpires(expires);
SignerSecretProvider secretProvider =
StringSignerSecretProviderCreator.newStringSignerSecretProvider();
Properties secretProviderProps = new Properties();
secretProviderProps.setProperty(
AuthenticationFilter.SIGNATURE_SECRET, secret);
secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
Signer signer = new Signer(secretProvider);
String tokenSigned = signer.sign(token.toString());
Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.containsHeader("WWW-Authenticate"))
.thenReturn(true);
FilterChain chain = Mockito.mock(FilterChain.class);
if (authorized) {
verifyAuthorized(filter, request, response, chain);
} else {
verifyUnauthorized(filter, request, response, chain);
}
} finally {
filter.destroy();
}
}
private static void verifyAuthorized(AuthenticationFilter filter,
HttpServletRequest request,
HttpServletResponse response,
FilterChain chain) throws
Exception {
final Map<String, String> cookieMap = new HashMap<>();
Mockito.doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
String cookieHeader = (String) invocation.getArguments()[1];
parseCookieMap(cookieHeader, cookieMap);
return null;
}
}).when(response).addHeader(Mockito.eq("Set-Cookie"), Mockito.anyString());
filter.doFilter(request, response, chain);
String v = cookieMap.get(AuthenticatedURL.AUTH_COOKIE);
Assert.assertNotNull("cookie missing", v);
Assert.assertTrue(v.contains("u=") && v.contains("p=") && v.contains
("t=") && v.contains("i=") && v.contains("e=")
&& v.contains("s="));
Mockito.verify(chain).doFilter(Mockito.any(ServletRequest.class),
Mockito.any(ServletResponse.class));
SignerSecretProvider secretProvider =
StringSignerSecretProviderCreator.newStringSignerSecretProvider();
Properties secretProviderProps = new Properties();
secretProviderProps.setProperty(
AuthenticationFilter.SIGNATURE_SECRET, "secret");
secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
Signer signer = new Signer(secretProvider);
String value = signer.verifyAndExtract(v);
AuthenticationToken token = AuthenticationToken.parse(value);
assertThat(token.getMaxInactives(), not(0L));
assertThat(token.getExpires(), not(0L));
Assert.assertFalse("Token is expired.", token.isExpired());
}
private static void verifyUnauthorized(AuthenticationFilter filter,
HttpServletRequest request,
HttpServletResponse response,
FilterChain chain) throws
IOException,
ServletException {
final HashMap<String, String> cookieMap = new HashMap<String, String>();
final Map<String, String> cookieMap = new HashMap<String, String>();
Mockito.doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {

View File

@ -750,6 +750,12 @@ Release 2.8.0 - UNRELEASED
HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)
HADOOP-12050. Enable MaxInactiveInterval for hadoop http auth token
(hzlu via benoyantony)
HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs.
(Anu Engineer via xyao)
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
@ -790,6 +796,9 @@ Release 2.8.0 - UNRELEASED
BUG FIXES
HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals
before deleting (Casey Brotherton via harsh)
HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
is an I/O error during requestShortCircuitShm (cmccabe)
@ -1063,6 +1072,16 @@ Release 2.8.0 - UNRELEASED
HADOOP-12322. typos in rpcmetrics.java. (Anu Engineer via
Arpit Agarwal)
HADOOP-12317. Applications fail on NM restart on some linux distro
because NM container recovery declares AM container as LOST
(adhoot via rkanter)
HADOOP-12347. Fix mismatch parameter name in javadocs of
AuthToken#setMaxInactives (xyao)
HADOOP-12362. Set hadoop.tmp.dir and hadoop.log.dir in pom.
(Charlie Helin via wang)
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
@ -1092,6 +1111,9 @@ Release 2.7.2 - UNRELEASED
HADOOP-11932. MetricsSinkAdapter may hang when being stopped.
(Brahma Reddy Battula via jianhe)
HADOOP-12061. Incorrect command in single cluster setup document.
(Kengo Seki via aajisaka)
Release 2.7.1 - 2015-07-06
INCOMPATIBLE CHANGES
@ -1861,6 +1883,18 @@ Release 2.7.0 - 2015-04-20
HADOOP-11837. AuthenticationFilter should destroy SignerSecretProvider in
Tomcat deployments. (Bowen Zhang via wheat9)
Release 2.6.2 - UNRELEASED
INCOMPATIBLE CHANGES
NEW FEATURES
IMPROVEMENTS
OPTIMIZATIONS
BUG FIXES
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -148,6 +148,19 @@ log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
#
# NameNode metrics logging.
# The default is to retain two namenode-metrics.log files up to 64MB each.
#
log4j.logger.NameNodeMetricsLog=INFO,NNMETRICSRFA
log4j.additivity.NameNodeMetricsLog=false
log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
#
# mapred audit logging
#

View File

@ -235,6 +235,11 @@ public class CommonConfigurationKeysPublic {
/** Default value for IPC_SERVER_MAX_CONNECTIONS_KEY */
public static final int IPC_SERVER_MAX_CONNECTIONS_DEFAULT = 0;
/** Logs if a RPC is really slow compared to rest of RPCs. */
public static final String IPC_SERVER_LOG_SLOW_RPC =
"ipc.server.log.slow.rpc";
public static final boolean IPC_SERVER_LOG_SLOW_RPC_DEFAULT = false;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY =
"hadoop.rpc.socket.factory.class.default";

View File

@ -53,7 +53,7 @@ import org.apache.hadoop.classification.InterfaceStability;
* partial block.</li>
* </ol>
*
* Following combination is not valid and will result in
* Following combinations are not valid and will result in
* {@link HadoopIllegalArgumentException}:
* <ol>
* <li> APPEND|OVERWRITE</li>

View File

@ -161,12 +161,19 @@ public class TrashPolicyDefault extends TrashPolicy {
@SuppressWarnings("deprecation")
@Override
public void createCheckpoint() throws IOException {
createCheckpoint(new Date());
}
@SuppressWarnings("deprecation")
public void createCheckpoint(Date date) throws IOException {
if (!fs.exists(current)) // no trash, no checkpoint
return;
Path checkpointBase;
synchronized (CHECKPOINT) {
checkpointBase = new Path(trash, CHECKPOINT.format(new Date()));
checkpointBase = new Path(trash, CHECKPOINT.format(date));
}
Path checkpoint = checkpointBase;
@ -287,7 +294,7 @@ public class TrashPolicyDefault extends TrashPolicy {
TrashPolicyDefault trash = new TrashPolicyDefault(
fs, home.getPath(), conf);
trash.deleteCheckpoint();
trash.createCheckpoint();
trash.createCheckpoint(new Date(now));
} catch (IOException e) {
LOG.warn("Trash caught: "+e+". Skipping "+home.getPath()+".");
}

View File

@ -567,7 +567,7 @@ public class ProtobufRpcEngine implements RpcEngine {
/**
* This is a server side method, which is invoked over RPC. On success
* the return response has protobuf response payload. On failure, the
* exception name and the stack trace are return in the resposne.
* exception name and the stack trace are returned in the response.
* See {@link HadoopRpcResponseProto}
*
* In this method there three types of exceptions possible and they are
@ -657,6 +657,9 @@ public class ProtobufRpcEngine implements RpcEngine {
server.rpcMetrics.addRpcProcessingTime(processingTime);
server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName,
processingTime);
if (server.isLogSlowRPC()) {
server.logSlowRpcCalls(methodName, processingTime);
}
}
return new RpcResponseWrapper(result);
}

View File

@ -387,6 +387,62 @@ public abstract class Server {
private Responder responder = null;
private Handler[] handlers = null;
private boolean logSlowRPC = false;
/**
* Checks if LogSlowRPC is set true.
* @return
*/
protected boolean isLogSlowRPC() {
return logSlowRPC;
}
/**
* Sets slow RPC flag.
* @param logSlowRPCFlag
*/
@VisibleForTesting
protected void setLogSlowRPC(boolean logSlowRPCFlag) {
this.logSlowRPC = logSlowRPCFlag;
}
/**
* Logs a Slow RPC Request.
*
* @param methodName - RPC Request method name
* @param processingTime - Processing Time.
*
* if this request took too much time relative to other requests
* we consider that as a slow RPC. 3 is a magic number that comes
* from 3 sigma deviation. A very simple explanation can be found
* by searching for 689599.7 rule. We flag an RPC as slow RPC
* if and only if it falls above 99.7% of requests. We start this logic
* only once we have enough sample size.
*/
void logSlowRpcCalls(String methodName, int processingTime) {
final int deviation = 3;
// 1024 for minSampleSize just a guess -- not a number computed based on
// sample size analysis. It is chosen with the hope that this
// number is high enough to avoid spurious logging, yet useful
// in practice.
final int minSampleSize = 1024;
final double threeSigma = rpcMetrics.getProcessingMean() +
(rpcMetrics.getProcessingStdDev() * deviation);
if ((rpcMetrics.getProcessingSampleCount() > minSampleSize) &&
(processingTime > threeSigma)) {
if(LOG.isWarnEnabled()) {
String client = CurCall.get().connection.toString();
LOG.warn(
"Slow RPC : " + methodName + " took " + processingTime +
" milliseconds to process from client " + client);
}
rpcMetrics.incrSlowRpc();
}
}
/**
* A convenience method to bind to a given address and report
* better exceptions if the address is not a valid host.
@ -2346,6 +2402,10 @@ public abstract class Server {
CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY,
CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_DEFAULT);
this.setLogSlowRPC(conf.getBoolean(
CommonConfigurationKeysPublic.IPC_SERVER_LOG_SLOW_RPC,
CommonConfigurationKeysPublic.IPC_SERVER_LOG_SLOW_RPC_DEFAULT));
// Create the responder here
responder = new Responder();

View File

@ -551,6 +551,9 @@ public class WritableRpcEngine implements RpcEngine {
server.rpcMetrics.addRpcProcessingTime(processingTime);
server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName,
processingTime);
if (server.isLogSlowRPC()) {
server.logSlowRpcCalls(call.getMethodName(), processingTime);
}
}
}
}

View File

@ -97,6 +97,8 @@ public class RpcMetrics {
MutableCounterLong rpcAuthorizationSuccesses;
@Metric("Number of client backoff requests")
MutableCounterLong rpcClientBackoff;
@Metric("Number of Slow RPC calls")
MutableCounterLong rpcSlowCalls;
@Metric("Number of open connections") public int numOpenConnections() {
return server.getNumOpenConnections();
@ -202,4 +204,50 @@ public class RpcMetrics {
public void incrClientBackoff() {
rpcClientBackoff.incr();
}
/**
* Increments the Slow RPC counter.
*/
public void incrSlowRpc() {
rpcSlowCalls.incr();
}
/**
* Returns a MutableRate Counter.
* @return Mutable Rate
*/
public MutableRate getRpcProcessingTime() {
return rpcProcessingTime;
}
/**
* Returns the number of samples that we have seen so far.
* @return long
*/
public long getProcessingSampleCount() {
return rpcProcessingTime.lastStat().numSamples();
}
/**
* Returns mean of RPC Processing Times.
* @return double
*/
public double getProcessingMean() {
return rpcProcessingTime.lastStat().mean();
}
/**
* Return Standard Deviation of the Processing Time.
* @return double
*/
public double getProcessingStdDev() {
return rpcProcessingTime.lastStat().stddev();
}
/**
* Returns the number of slow calls.
* @return long
*/
public long getRpcSlowCalls() {
return rpcSlowCalls.value();
}
}

View File

@ -140,7 +140,12 @@ public class MutableStat extends MutableMetric {
}
}
private SampleStat lastStat() {
/**
* Return a SampleStat object that supports
* calls like StdDev and Mean.
* @return SampleStat
*/
public SampleStat lastStat() {
return changed() ? intervalStat : prevStat;
}

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.metrics2.util;
import java.lang.management.ManagementFactory;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.management.InstanceAlreadyExistsException;
import javax.management.MBeanServer;
@ -38,6 +40,13 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@InterfaceStability.Stable
public class MBeans {
private static final Log LOG = LogFactory.getLog(MBeans.class);
private static final String DOMAIN_PREFIX = "Hadoop:";
private static final String SERVICE_PREFIX = "service=";
private static final String NAME_PREFIX = "name=";
private static final Pattern MBEAN_NAME_PATTERN = Pattern.compile(
"^" + DOMAIN_PREFIX + SERVICE_PREFIX + "([^,]+)," +
NAME_PREFIX + "(.+)$");
/**
* Register the MBean using our standard MBeanName format
@ -72,6 +81,26 @@ public class MBeans {
return null;
}
public static String getMbeanNameService(final ObjectName objectName) {
Matcher matcher = MBEAN_NAME_PATTERN.matcher(objectName.toString());
if (matcher.matches()) {
return matcher.group(1);
} else {
throw new IllegalArgumentException(
objectName + " is not a valid Hadoop mbean");
}
}
public static String getMbeanNameName(final ObjectName objectName) {
Matcher matcher = MBEAN_NAME_PATTERN.matcher(objectName.toString());
if (matcher.matches()) {
return matcher.group(2);
} else {
throw new IllegalArgumentException(
objectName + " is not a valid Hadoop mbean");
}
}
static public void unregister(ObjectName mbeanName) {
LOG.debug("Unregistering "+ mbeanName);
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
@ -88,13 +117,13 @@ public class MBeans {
}
static private ObjectName getMBeanName(String serviceName, String nameName) {
ObjectName name = null;
String nameStr = "Hadoop:service="+ serviceName +",name="+ nameName;
String nameStr = DOMAIN_PREFIX + SERVICE_PREFIX + serviceName + "," +
NAME_PREFIX + nameName;
try {
name = DefaultMetricsSystem.newMBeanName(nameStr);
return DefaultMetricsSystem.newMBeanName(nameStr);
} catch (Exception e) {
LOG.warn("Error creating MBean object name: "+ nameStr, e);
return null;
}
return name;
}
}

View File

@ -80,13 +80,14 @@ public class HostsFileReader {
String[] nodes = line.split("[ \t\n\f\r]+");
if (nodes != null) {
for (int i = 0; i < nodes.length; i++) {
if (nodes[i].trim().startsWith("#")) {
nodes[i] = nodes[i].trim();
if (nodes[i].startsWith("#")) {
// Everything from now on is a comment
break;
}
if (!nodes[i].isEmpty()) {
LOG.info("Adding " + nodes[i] + " to the list of " + type +
" hosts from " + filename);
LOG.info("Adding a node \"" + nodes[i] + "\" to the list of "
+ type + " hosts from " + filename);
set.add(nodes[i]);
}
}

View File

@ -212,13 +212,18 @@ abstract public class Shell {
public static String[] getCheckProcessIsAliveCommand(String pid) {
return Shell.WINDOWS ?
new String[] { Shell.WINUTILS, "task", "isAlive", pid } :
new String[] { "kill", "-0", isSetsidAvailable ? "-" + pid : pid };
isSetsidAvailable ?
new String[] { "kill", "-0", "--", "-" + pid } :
new String[] { "kill", "-0", pid };
}
/** Return a command to send a signal to a given pid */
public static String[] getSignalKillCommand(int code, String pid) {
return Shell.WINDOWS ? new String[] { Shell.WINUTILS, "task", "kill", pid } :
new String[] { "kill", "-" + code, isSetsidAvailable ? "-" + pid : pid };
return Shell.WINDOWS ?
new String[] { Shell.WINUTILS, "task", "kill", pid } :
isSetsidAvailable ?
new String[] { "kill", "-" + code, "--", "-" + pid } :
new String[] { "kill", "-" + code, pid };
}
public static final String ENV_NAME_REGEX = "[A-Za-z_][A-Za-z0-9_]*";

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.util;
import com.google.common.base.Preconditions;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.URI;
@ -28,7 +27,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
@ -45,6 +43,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.net.NetUtils;
import com.google.common.base.Preconditions;
import com.google.common.net.InetAddresses;
/**
@ -316,7 +315,18 @@ public class StringUtils {
* @return the arraylist of the comma seperated string values
*/
public static String[] getStrings(String str){
Collection<String> values = getStringCollection(str);
String delim = ",";
return getStrings(str, delim);
}
/**
* Returns an arraylist of strings.
* @param str the string values
* @param delim delimiter to separate the values
* @return the arraylist of the seperated string values
*/
public static String[] getStrings(String str, String delim){
Collection<String> values = getStringCollection(str, delim);
if(values.size() == 0) {
return null;
}
@ -379,19 +389,6 @@ public class StringUtils {
return str.trim().split("\\s*,\\s*");
}
/**
* Trims all the strings in a Collection<String> and returns a Set<String>.
* @param strings
* @return
*/
public static Set<String> getTrimmedStrings(Collection<String> strings) {
Set<String> trimmedStrings = new HashSet<String>();
for (String string: strings) {
trimmedStrings.add(string.trim());
}
return trimmedStrings;
}
final public static String[] emptyStringArray = {};
final public static char COMMA = ',';
final public static String COMMA_STR = ",";

View File

@ -1032,6 +1032,15 @@ for ldap providers in the same way as above does.
</description>
</property>
<property>
<name>ipc.server.log.slow.rpc</name>
<value>false</value>
<description>This setting is useful to troubleshoot performance issues for
various services. If this value is set to true then we log requests that
fall into 99th percentile as well as increment RpcSlowCalls counter.
</description>
</property>
<!-- Proxy Configuration -->
<property>

View File

@ -41,13 +41,17 @@ The following properties should be in the `core-site.xml` of all the nodes in th
`hadoop.http.filter.initializers`: add to this property the `org.apache.hadoop.security.AuthenticationFilterInitializer` initializer class.
`hadoop.http.authentication.type`: Defines authentication used for the HTTP web-consoles. The supported values are: `simple` | `kerberos` | `#AUTHENTICATION_HANDLER_CLASSNAME#`. The dfeault value is `simple`.
`hadoop.http.authentication.type`: Defines authentication used for the HTTP web-consoles. The supported values are: `simple` | `kerberos` | `#AUTHENTICATION_HANDLER_CLASSNAME#`. The default value is `simple`.
`hadoop.http.authentication.token.validity`: Indicates how long (in seconds) an authentication token is valid before it has to be renewed. The default value is `36000`.
`hadoop.http.authentication.token.MaxInactiveInterval`: Specifies the time, in seconds, between client requests the server will invalidate the token. The default value is `1800` (30 minutes).
`hadoop.http.authentication.signature.secret.file`: The signature secret file for signing the authentication tokens. The same secret should be used for all nodes in the cluster, JobTracker, NameNode, DataNode and TastTracker. The default value is `$user.home/hadoop-http-auth-signature-secret`. IMPORTANT: This file should be readable only by the Unix user running the daemons.
`hadoop.http.authentication.cookie.domain`: The domain to use for the HTTP cookie that stores the authentication token. In order to authentiation to work correctly across all nodes in the cluster the domain must be correctly set. There is no default value, the HTTP cookie will not have a domain working only with the hostname issuing the HTTP cookie.
`hadoop.http.authentication.cookie.domain`: The domain to use for the HTTP cookie that stores the authentication token. In order to authentication to work correctly across all nodes in the cluster the domain must be correctly set. There is no default value, the HTTP cookie will not have a domain working only with the hostname issuing the HTTP cookie.
`hadoop.http.authentication.cookie.persistent`: Specifies the persistence of the HTTP cookie. If the value is true, the cookie is a persistent one. Otherwise, it is a session cookie. The default value is `false`(session cookie).
IMPORTANT: when using IP addresses, browsers ignore cookies with domain settings. For this setting to work properly all nodes in the cluster must be configured to generate URLs with `hostname.domain` names on it.

View File

@ -237,6 +237,8 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
| `HAState` | (HA-only) Current state of the NameNode: initializing or active or standby or stopping state |
| `FSState` | Current state of the file system: Safemode or Operational |
| `LockQueueLength` | Number of threads waiting to acquire FSNameSystem lock |
| `TotalSyncCount` | Total number of sync operations performed by edit log |
| `TotalSyncTimes` | Total number of milliseconds spent by various edit logs in sync operation|
JournalNode
-----------

View File

@ -140,7 +140,7 @@ If you cannot ssh to localhost without a passphrase, execute the following comma
$ ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
$ cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
$ chmod 0700 ~/.ssh/authorized_keys
$ chmod 0600 ~/.ssh/authorized_keys
$H3 Execution

View File

@ -19,6 +19,8 @@ package org.apache.hadoop.ipc;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.InetSocketAddress;
@ -27,7 +29,9 @@ import java.net.URISyntaxException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.metrics.RpcMetrics;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
import org.apache.hadoop.ipc.protobuf.TestProtos;
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto;
@ -41,6 +45,7 @@ import org.junit.Test;
import org.junit.Before;
import org.junit.After;
import com.google.protobuf.BlockingService;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -56,7 +61,8 @@ public class TestProtoBufRpc {
private static InetSocketAddress addr;
private static Configuration conf;
private static RPC.Server server;
private final static int SLEEP_DURATION = 1000;
@ProtocolInfo(protocolName = "testProto", protocolVersion = 1)
public interface TestRpcService
extends TestProtobufRpcProto.BlockingInterface {
@ -114,12 +120,23 @@ public class TestProtoBufRpc {
return EchoResponseProto.newBuilder().setMessage(request.getMessage())
.build();
}
@Override
public TestProtos.SleepResponseProto sleep(RpcController controller,
TestProtos.SleepRequestProto request) throws ServiceException {
try{
Thread.sleep(request.getMilliSeconds());
} catch (InterruptedException ex){
}
return TestProtos.SleepResponseProto.newBuilder().build();
}
}
@Before
public void setUp() throws IOException { // Setup server for both protocols
conf = new Configuration();
conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024);
conf.setBoolean(CommonConfigurationKeys.IPC_SERVER_LOG_SLOW_RPC, true);
// Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
@ -257,4 +274,62 @@ public class TestProtoBufRpc {
// expected
}
}
@Test(timeout = 12000)
public void testLogSlowRPC() throws IOException, ServiceException {
TestRpcService2 client = getClient2();
// make 10 K fast calls
for (int x = 0; x < 10000; x++) {
try {
EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build();
client.ping2(null, emptyRequest);
} catch (Exception ex) {
throw ex;
}
}
// Ensure RPC metrics are updated
RpcMetrics rpcMetrics = server.getRpcMetrics();
assertTrue(rpcMetrics.getProcessingSampleCount() > 999L);
long before = rpcMetrics.getRpcSlowCalls();
// make a really slow call. Sleep sleeps for 1000ms
TestProtos.SleepRequestProto sleepRequest =
TestProtos.SleepRequestProto.newBuilder()
.setMilliSeconds(SLEEP_DURATION * 3).build();
TestProtos.SleepResponseProto Response = client.sleep(null, sleepRequest);
long after = rpcMetrics.getRpcSlowCalls();
// Ensure slow call is logged.
Assert.assertEquals(before + 1L, after);
}
@Test(timeout = 12000)
public void testEnsureNoLogIfDisabled() throws IOException, ServiceException {
// disable slow RPC logging
server.setLogSlowRPC(false);
TestRpcService2 client = getClient2();
// make 10 K fast calls
for (int x = 0; x < 10000; x++) {
EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build();
client.ping2(null, emptyRequest);
}
// Ensure RPC metrics are updated
RpcMetrics rpcMetrics = server.getRpcMetrics();
assertTrue(rpcMetrics.getProcessingSampleCount() > 999L);
long before = rpcMetrics.getRpcSlowCalls();
// make a really slow call. Sleep sleeps for 1000ms
TestProtos.SleepRequestProto sleepRequest =
TestProtos.SleepRequestProto.newBuilder()
.setMilliSeconds(SLEEP_DURATION).build();
TestProtos.SleepResponseProto Response = client.sleep(null, sleepRequest);
long after = rpcMetrics.getRpcSlowCalls();
// make sure we never called into Log slow RPC routine.
assertEquals(before, after);
}
}

View File

@ -87,7 +87,7 @@ public class MetricsAsserts {
* Call getMetrics on source and get a record builder mock to verify
* @param source the metrics source
* @param all if true, return all metrics even if not changed
* @return the record builder mock to verify
* @return the record builder mock to verifyÏ
*/
public static MetricsRecordBuilder getMetrics(MetricsSource source,
boolean all) {

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.util;
import junit.framework.TestCase;
import org.junit.Assert;
import java.io.BufferedReader;
import java.io.File;
@ -150,6 +151,44 @@ public class TestShell extends TestCase {
System.err.println("after: " + timersAfter);
assertEquals(timersBefore, timersAfter);
}
public void testGetCheckProcessIsAliveCommand() throws Exception {
String anyPid = "9999";
String[] checkProcessAliveCommand = Shell.getCheckProcessIsAliveCommand(
anyPid);
String[] expectedCommand;
if (Shell.WINDOWS) {
expectedCommand =
new String[]{ Shell.WINUTILS, "task", "isAlive", anyPid };
} else if (Shell.isSetsidAvailable) {
expectedCommand = new String[]{ "kill", "-0", "--", "-" + anyPid };
} else {
expectedCommand = new String[]{"kill", "-0", anyPid};
}
Assert.assertArrayEquals(expectedCommand, checkProcessAliveCommand);
}
public void testGetSignalKillCommand() throws Exception {
String anyPid = "9999";
int anySignal = 9;
String[] checkProcessAliveCommand = Shell.getSignalKillCommand(anySignal,
anyPid);
String[] expectedCommand;
if (Shell.WINDOWS) {
expectedCommand =
new String[]{ Shell.WINUTILS, "task", "kill", anyPid };
} else if (Shell.isSetsidAvailable) {
expectedCommand =
new String[]{ "kill", "-" + anySignal, "--", "-" + anyPid };
} else {
expectedCommand =
new String[]{ "kill", "-" + anySignal, anyPid };
}
Assert.assertArrayEquals(expectedCommand, checkProcessAliveCommand);
}
private void testInterval(long interval) throws IOException {

View File

@ -34,3 +34,10 @@ message EchoRequestProto {
message EchoResponseProto {
required string message = 1;
}
message SleepRequestProto{
required int32 milliSeconds = 1;
}
message SleepResponseProto{
}

View File

@ -37,4 +37,5 @@ service TestProtobufRpcProto {
service TestProtobufRpc2Proto {
rpc ping2(EmptyRequestProto) returns (EmptyResponseProto);
rpc echo2(EchoRequestProto) returns (EchoResponseProto);
rpc sleep(SleepRequestProto) returns (SleepResponseProto);
}

View File

@ -21,4 +21,14 @@
<Package name="org.apache.hadoop.hdfs.protocol.proto" />
<Bug pattern="SE_BAD_FIELD,MS_SHOULD_BE_FINAL,UCF_USELESS_CONTROL_FLOW" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager$EndpointShmManager" />
<Method name="allocSlot" />
<Bug pattern="UL_UNRELEASED_LOCK_EXCEPTION_PATH" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager$EndpointShmManager" />
<Method name="allocSlot" />
<Bug pattern="UL_UNRELEASED_LOCK" />
</Match>
</FindBugsFilter>

View File

@ -36,11 +36,13 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.UnsupportedEncodingException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.text.SimpleDateFormat;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
@ -429,4 +431,28 @@ public class DFSUtilClient {
new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH);
return df.format(date);
}
private static final Map<String, Boolean> localAddrMap = Collections
.synchronizedMap(new HashMap<String, Boolean>());
public static boolean isLocalAddress(InetSocketAddress targetAddr) {
InetAddress addr = targetAddr.getAddress();
Boolean cached = localAddrMap.get(addr.getHostAddress());
if (cached != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("Address " + targetAddr +
(cached ? " is local" : " is not local"));
}
return cached;
}
boolean local = NetUtils.isLocalAddress(addr);
if (LOG.isTraceEnabled()) {
LOG.trace("Address " + targetAddr +
(local ? " is local" : " is not local"));
}
localAddrMap.put(addr.getHostAddress(), local);
return local;
}
}

View File

@ -0,0 +1,88 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* The public API for ReplicaAccessor objects.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class ReplicaAccessor {
/**
* Read bytes from the replica.
*
* @param pos The position in the replica to start reading at.
* Must not be negative.
* @param buf The byte array to read into.
* @param off The offset within buf to start reading into.
* @param len The maximum length to read.
*
* @return The number of bytes read. If the read extends past the end
* of the replica, a short read count will be returned. We
* will never return a negative number. We will never
* return a short read count unless EOF is reached.
*/
public abstract int read(long pos, byte[] buf, int off, int len)
throws IOException;
/**
* Read bytes from the replica.
*
* @param pos The position in the replica to start reading at.
* Must not be negative.
* @param buf The byte buffer to read into. The amount to read will be
* dictated by the remaining bytes between the current
* position and the limit. The ByteBuffer may or may not be
* direct.
*
* @return The number of bytes read. If the read extends past the end
* of the replica, a short read count will be returned. We
* will never return a negative number. We will never return
* a short read count unless EOF is reached.
*/
public abstract int read(long pos, ByteBuffer buf) throws IOException;
/**
* Release the resources associated with the ReplicaAccessor.
*
* It is recommended that implementations never throw an IOException. The
* method is declared as throwing IOException in order to remain compatible
* with java.io.Closeable. If an exception is thrown, the ReplicaAccessor
* must still be closed when the function returns in order to prevent a
* resource leak.
*/
public abstract void close() throws IOException;
/**
* Return true if bytes read via this accessor should count towards the
* local byte count statistics.
*/
public abstract boolean isLocal();
/**
* Return true if bytes read via this accessor should count towards the
* short-circuit byte count statistics.
*/
public abstract boolean isShortCircuit();
}

View File

@ -0,0 +1,101 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* The public API for creating a new ReplicaAccessor.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class ReplicaAccessorBuilder {
/**
* Set the file name which is being opened. Provided for debugging purposes.
*/
public abstract ReplicaAccessorBuilder setFileName(String fileName);
/** Set the block ID and block pool ID which are being opened. */
public abstract ReplicaAccessorBuilder
setBlock(long blockId, String blockPoolId);
/**
* Set whether checksums must be verified. Checksums should be skipped if
* the user has disabled checksum verification in the configuration. Users
* may wish to do this if their software does checksum verification at a
* higher level than HDFS.
*/
public abstract ReplicaAccessorBuilder
setVerifyChecksum(boolean verifyChecksum);
/** Set the name of the HDFS client. Provided for debugging purposes. */
public abstract ReplicaAccessorBuilder setClientName(String clientName);
/**
* Set whether short-circuit is enabled. Short-circuit may be disabled if
* the user has set dfs.client.read.shortcircuit to false, or if the block
* being read is under construction. The fact that this bit is enabled does
* not mean that the user has permission to do short-circuit reads or to
* access the replica-- that must be checked separately by the
* ReplicaAccessorBuilder implementation.
*/
public abstract ReplicaAccessorBuilder
setAllowShortCircuitReads(boolean allowShortCircuit);
/**
* Set the length of the replica which is visible to this client. If bytes
* are added later, they will not be visible to the ReplicaAccessor we are
* building. In order to see more of the replica, the client must re-open
* this HDFS file. The visible length provides an upper bound, but not a
* lower one. If the replica is deleted or truncated, fewer bytes may be
* visible than specified here.
*/
public abstract ReplicaAccessorBuilder setVisibleLength(long visibleLength);
/**
* Set the configuration to use. ReplicaAccessorBuilder subclasses should
* define their own configuration prefix. For example, the foobar plugin
* could look for configuration keys like foo.bar.parameter1,
* foo.bar.parameter2.
*/
public abstract ReplicaAccessorBuilder setConfiguration(Configuration conf);
/**
* Set the block access token to use.
*/
public abstract ReplicaAccessorBuilder setBlockAccessToken(byte[] token);
/**
* Build a new ReplicaAccessor.
*
* The implementation must perform any necessary access checks before
* constructing the ReplicaAccessor. If there is a hardware-level or
* network-level setup operation that could fail, it should be done here. If
* the implementation returns a ReplicaAccessor, we will assume that it works
* and not attempt to construct a normal BlockReader.
*
* If the ReplicaAccessor could not be built, implementations may wish to log
* a message at TRACE level indicating why.
*
* @return null if the ReplicaAccessor could not be built; the
* ReplicaAccessor otherwise.
*/
public abstract ReplicaAccessor build();
}

View File

@ -17,7 +17,12 @@
*/
package org.apache.hadoop.hdfs.client;
import org.apache.hadoop.classification.InterfaceAudience;
import java.util.concurrent.TimeUnit;
/** Client configuration properties */
@InterfaceAudience.Private
public interface HdfsClientConfigKeys {
long SECOND = 1000L;
long MINUTE = 60 * SECOND;
@ -31,7 +36,7 @@ public interface HdfsClientConfigKeys {
String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
"^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
static final String PREFIX = "dfs.client.";
String PREFIX = "dfs.client.";
String DFS_NAMESERVICES = "dfs.nameservices";
int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;
String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
@ -41,6 +46,75 @@ public interface HdfsClientConfigKeys {
int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
"dfs.namenode.kerberos.principal";
String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
int DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
String DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout";
String DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY =
"dfs.client.socketcache.capacity";
int DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = 16;
String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY =
"dfs.client.socketcache.expiryMsec";
long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 3000;
String DFS_CLIENT_USE_DN_HOSTNAME = "dfs.client.use.datanode.hostname";
boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT = false;
String DFS_CLIENT_CACHE_DROP_BEHIND_WRITES =
"dfs.client.cache.drop.behind.writes";
String DFS_CLIENT_CACHE_DROP_BEHIND_READS =
"dfs.client.cache.drop.behind.reads";
String DFS_CLIENT_CACHE_READAHEAD = "dfs.client.cache.readahead";
String DFS_CLIENT_CACHED_CONN_RETRY_KEY = "dfs.client.cached.conn.retry";
int DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT = 3;
String DFS_CLIENT_CONTEXT = "dfs.client.context";
String DFS_CLIENT_CONTEXT_DEFAULT = "default";
String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS =
"dfs.client.file-block-storage-locations.num-threads";
int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT = 10;
String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS =
"dfs.client.file-block-storage-locations.timeout.millis";
int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT = 1000;
String DFS_CLIENT_USE_LEGACY_BLOCKREADER =
"dfs.client.use.legacy.blockreader";
boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT = false;
String DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL =
"dfs.client.use.legacy.blockreader.local";
boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT = false;
String DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY =
"dfs.client.datanode-restart.timeout";
long DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT = 30;
// Much code in hdfs is not yet updated to use these keys.
// the initial delay (unit is ms) for locateFollowingBlock, the delay time
// will increase exponentially(double) for each retry.
String DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY =
"dfs.client.max.block.acquire.failures";
int DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT = 3;
String DFS_CHECKSUM_TYPE_KEY = "dfs.checksum.type";
String DFS_CHECKSUM_TYPE_DEFAULT = "CRC32C";
String DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum";
int DFS_BYTES_PER_CHECKSUM_DEFAULT = 512;
String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY =
"dfs.datanode.socket.write.timeout";
String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC =
"dfs.client.domain.socket.data.traffic";
boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = false;
String DFS_DOMAIN_SOCKET_PATH_KEY = "dfs.domain.socket.path";
String DFS_DOMAIN_SOCKET_PATH_DEFAULT = "";
String DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS =
"dfs.short.circuit.shared.memory.watcher.interrupt.check.ms";
int DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT =
60000;
String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
"dfs.client.slow.io.warning.threshold.ms";
long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000;
String DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS =
"dfs.client.key.provider.cache.expiry";
long DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT =
TimeUnit.DAYS.toMillis(10); // 10 days
String DFS_HDFS_BLOCKS_METADATA_ENABLED =
"dfs.datanode.hdfs-blocks-metadata.enabled";
boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false;
String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
PREFIX + "replica.accessor.builder.classes";
/** dfs.client.retry configuration properties */
interface Retry {

View File

@ -17,50 +17,80 @@
*/
package org.apache.hadoop.hdfs.client.impl;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.BlockReaderFactory;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.ReplicaAccessorBuilder;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.util.ByteArrayManager;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.util.DataChecksum;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_REPLICATION_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Failover;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.HedgedRead;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Mmap;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Read;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Retry;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.ShortCircuit;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write;
import java.lang.Class;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* DFSClient configuration
* DFSClient configuration.
*/
public class DfsClientConf {
private static final Logger LOG = LoggerFactory.getLogger(DfsClientConf
.class);
private final int hdfsTimeout; // timeout value for a DFS operation.
@ -77,7 +107,7 @@ public class DfsClientConf {
private final ByteArrayManager.Conf writeByteArrayManagerConf;
private final int socketTimeout;
private final long excludedNodesCacheExpiry;
/** Wait time window (in msec) if BlockMissingException is caught */
/** Wait time window (in msec) if BlockMissingException is caught. */
private final int timeWindow;
private final int numCachedConnRetry;
private final int numBlockWriteRetry;
@ -89,133 +119,124 @@ public class DfsClientConf {
private final String taskId;
private final FsPermission uMask;
private final boolean connectToDnViaHostname;
private final boolean hdfsBlocksMetadataEnabled;
private final int fileBlockStorageLocationsNumThreads;
private final int fileBlockStorageLocationsTimeoutMs;
private final int retryTimesForGetLastBlockLength;
private final int retryIntervalForGetLastBlockLength;
private final long datanodeRestartTimeout;
private final long slowIoWarningThresholdMs;
private final ShortCircuitConf shortCircuitConf;
private final long hedgedReadThresholdMillis;
private final int hedgedReadThreadpoolSize;
private final List<Class<? extends ReplicaAccessorBuilder>>
replicaAccessorBuilderClasses;
private final int stripedReadThreadpoolSize;
public DfsClientConf(Configuration conf) {
// The hdfsTimeout is currently the same as the ipc timeout
// The hdfsTimeout is currently the same as the ipc timeout
hdfsTimeout = Client.getTimeout(conf);
maxRetryAttempts = conf.getInt(
HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
Retry.MAX_ATTEMPTS_KEY,
Retry.MAX_ATTEMPTS_DEFAULT);
timeWindow = conf.getInt(
HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY,
HdfsClientConfigKeys.Retry.WINDOW_BASE_DEFAULT);
Retry.WINDOW_BASE_KEY,
Retry.WINDOW_BASE_DEFAULT);
retryTimesForGetLastBlockLength = conf.getInt(
HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY,
HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT);
Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY,
Retry.TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT);
retryIntervalForGetLastBlockLength = conf.getInt(
HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_KEY,
HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT);
Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_KEY,
Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT);
maxFailoverAttempts = conf.getInt(
HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY,
HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT);
Failover.MAX_ATTEMPTS_KEY,
Failover.MAX_ATTEMPTS_DEFAULT);
failoverSleepBaseMillis = conf.getInt(
HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY,
HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT);
Failover.SLEEPTIME_BASE_KEY,
Failover.SLEEPTIME_BASE_DEFAULT);
failoverSleepMaxMillis = conf.getInt(
HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY,
HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT);
Failover.SLEEPTIME_MAX_KEY,
Failover.SLEEPTIME_MAX_DEFAULT);
maxBlockAcquireFailures = conf.getInt(
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
datanodeSocketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
HdfsServerConstants.WRITE_TIMEOUT);
datanodeSocketWriteTimeout = conf.getInt(
DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
HdfsConstants.WRITE_TIMEOUT);
ioBufferSize = conf.getInt(
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
defaultChecksumOpt = getChecksumOptFromConf(conf);
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
HdfsServerConstants.READ_TIMEOUT);
HdfsConstants.READ_TIMEOUT);
/** dfs.write.packet.size is an internal config variable */
writePacketSize = conf.getInt(
DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
writeMaxPackets = conf.getInt(
HdfsClientConfigKeys.Write.MAX_PACKETS_IN_FLIGHT_KEY,
HdfsClientConfigKeys.Write.MAX_PACKETS_IN_FLIGHT_DEFAULT);
Write.MAX_PACKETS_IN_FLIGHT_KEY,
Write.MAX_PACKETS_IN_FLIGHT_DEFAULT);
final boolean byteArrayManagerEnabled = conf.getBoolean(
HdfsClientConfigKeys.Write.ByteArrayManager.ENABLED_KEY,
HdfsClientConfigKeys.Write.ByteArrayManager.ENABLED_DEFAULT);
Write.ByteArrayManager.ENABLED_KEY,
Write.ByteArrayManager.ENABLED_DEFAULT);
if (!byteArrayManagerEnabled) {
writeByteArrayManagerConf = null;
} else {
final int countThreshold = conf.getInt(
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_THRESHOLD_KEY,
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_THRESHOLD_DEFAULT);
Write.ByteArrayManager.COUNT_THRESHOLD_KEY,
Write.ByteArrayManager.COUNT_THRESHOLD_DEFAULT);
final int countLimit = conf.getInt(
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_LIMIT_KEY,
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_LIMIT_DEFAULT);
Write.ByteArrayManager.COUNT_LIMIT_KEY,
Write.ByteArrayManager.COUNT_LIMIT_DEFAULT);
final long countResetTimePeriodMs = conf.getLong(
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_KEY,
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_DEFAULT);
Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_KEY,
Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_DEFAULT);
writeByteArrayManagerConf = new ByteArrayManager.Conf(
countThreshold, countLimit, countResetTimePeriodMs);
countThreshold, countLimit, countResetTimePeriodMs);
}
defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
DFS_BLOCK_SIZE_DEFAULT);
defaultReplication = (short) conf.getInt(
DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
excludedNodesCacheExpiry = conf.getLong(
HdfsClientConfigKeys.Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY,
HdfsClientConfigKeys.Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT);
prefetchSize = conf.getLong(HdfsClientConfigKeys.Read.PREFETCH_SIZE_KEY,
Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY,
Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT);
prefetchSize = conf.getLong(Read.PREFETCH_SIZE_KEY,
10 * defaultBlockSize);
numCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
numBlockWriteRetry = conf.getInt(
HdfsClientConfigKeys.BlockWrite.RETRIES_KEY,
HdfsClientConfigKeys.BlockWrite.RETRIES_DEFAULT);
BlockWrite.RETRIES_KEY,
BlockWrite.RETRIES_DEFAULT);
numBlockWriteLocateFollowingRetry = conf.getInt(
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
blockWriteLocateFollowingInitialDelayMs = conf.getInt(
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY,
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_DEFAULT);
BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY,
BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_DEFAULT);
uMask = FsPermission.getUMask(conf);
connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
hdfsBlocksMetadataEnabled = conf.getBoolean(
DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
fileBlockStorageLocationsNumThreads = conf.getInt(
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS,
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT);
fileBlockStorageLocationsTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT);
datanodeRestartTimeout = conf.getLong(
DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY,
DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT) * 1000;
slowIoWarningThresholdMs = conf.getLong(
DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
shortCircuitConf = new ShortCircuitConf(conf);
hedgedReadThresholdMillis = conf.getLong(
HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY,
HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_DEFAULT);
HedgedRead.THRESHOLD_MILLIS_KEY,
HedgedRead.THRESHOLD_MILLIS_DEFAULT);
hedgedReadThreadpoolSize = conf.getInt(
HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY,
HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_DEFAULT);
@ -226,19 +247,45 @@ public class DfsClientConf {
Preconditions.checkArgument(stripedReadThreadpoolSize > 0, "The value of " +
HdfsClientConfigKeys.StripedRead.THREADPOOL_SIZE_KEY +
" must be greater than 0.");
replicaAccessorBuilderClasses = loadReplicaAccessorBuilderClasses(conf);
}
@SuppressWarnings("unchecked")
private List<Class<? extends ReplicaAccessorBuilder>>
loadReplicaAccessorBuilderClasses(Configuration conf)
{
String classNames[] = conf.getTrimmedStrings(
HdfsClientConfigKeys.REPLICA_ACCESSOR_BUILDER_CLASSES_KEY);
if (classNames.length == 0) {
return Collections.emptyList();
}
ArrayList<Class<? extends ReplicaAccessorBuilder>> classes =
new ArrayList<>();
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
for (String className: classNames) {
try {
Class<? extends ReplicaAccessorBuilder> cls =
(Class<? extends ReplicaAccessorBuilder>)
classLoader.loadClass(className);
classes.add(cls);
} catch (Throwable t) {
LOG.warn("Unable to load " + className, t);
}
}
return classes;
}
private DataChecksum.Type getChecksumType(Configuration conf) {
final String checksum = conf.get(
DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
DFS_CHECKSUM_TYPE_KEY,
DFS_CHECKSUM_TYPE_DEFAULT);
try {
return DataChecksum.Type.valueOf(checksum);
} catch(IllegalArgumentException iae) {
DFSClient.LOG.warn("Bad checksum type: " + checksum + ". Using default "
+ DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
LOG.warn("Bad checksum type: {}. Using default {}", checksum,
DFS_CHECKSUM_TYPE_DEFAULT);
return DataChecksum.Type.valueOf(
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
DFS_CHECKSUM_TYPE_DEFAULT);
}
}
@ -439,27 +486,6 @@ public class DfsClientConf {
return connectToDnViaHostname;
}
/**
* @return the hdfsBlocksMetadataEnabled
*/
public boolean isHdfsBlocksMetadataEnabled() {
return hdfsBlocksMetadataEnabled;
}
/**
* @return the fileBlockStorageLocationsNumThreads
*/
public int getFileBlockStorageLocationsNumThreads() {
return fileBlockStorageLocationsNumThreads;
}
/**
* @return the getFileBlockStorageLocationsTimeoutMs
*/
public int getFileBlockStorageLocationsTimeoutMs() {
return fileBlockStorageLocationsTimeoutMs;
}
/**
* @return the retryTimesForGetLastBlockLength
*/
@ -509,6 +535,14 @@ public class DfsClientConf {
return stripedReadThreadpoolSize;
}
/**
* @return the replicaAccessorBuilderClasses
*/
public List<Class<? extends ReplicaAccessorBuilder>>
getReplicaAccessorBuilderClasses() {
return replicaAccessorBuilderClasses;
}
/**
* @return the shortCircuitConf
*/
@ -516,8 +550,11 @@ public class DfsClientConf {
return shortCircuitConf;
}
/**
* Configuration for short-circuit reads.
*/
public static class ShortCircuitConf {
private static final Log LOG = LogFactory.getLog(ShortCircuitConf.class);
private static final Logger LOG = DfsClientConf.LOG;
private final int socketCacheCapacity;
private final long socketCacheExpiry;
@ -531,9 +568,9 @@ public class DfsClientConf {
private final boolean shortCircuitLocalReads;
private final boolean domainSocketDataTraffic;
private final int shortCircuitStreamsCacheSize;
private final long shortCircuitStreamsCacheExpiryMs;
private final long shortCircuitStreamsCacheExpiryMs;
private final int shortCircuitSharedMemoryWatcherInterruptCheckMs;
private final boolean shortCircuitMmapEnabled;
private final int shortCircuitMmapCacheSize;
private final long shortCircuitMmapCacheExpiryMs;
@ -542,10 +579,6 @@ public class DfsClientConf {
private final long keyProviderCacheExpiryMs;
@VisibleForTesting
public BlockReaderFactory.FailureInjector brfFailureInjector =
new BlockReaderFactory.FailureInjector();
public ShortCircuitConf(Configuration conf) {
socketCacheCapacity = conf.getInt(
DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
@ -555,66 +588,64 @@ public class DfsClientConf {
DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT);
useLegacyBlockReader = conf.getBoolean(
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER,
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
DFS_CLIENT_USE_LEGACY_BLOCKREADER,
DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
useLegacyBlockReaderLocal = conf.getBoolean(
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT);
DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT);
shortCircuitLocalReads = conf.getBoolean(
HdfsClientConfigKeys.Read.ShortCircuit.KEY,
HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT);
Read.ShortCircuit.KEY,
Read.ShortCircuit.DEFAULT);
domainSocketDataTraffic = conf.getBoolean(
DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT);
DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT);
domainSocketPath = conf.getTrimmed(
DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
DFS_DOMAIN_SOCKET_PATH_KEY,
DFS_DOMAIN_SOCKET_PATH_DEFAULT);
if (LOG.isDebugEnabled()) {
LOG.debug(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL
+ " = " + useLegacyBlockReaderLocal);
LOG.debug(HdfsClientConfigKeys.Read.ShortCircuit.KEY
+ " = " + shortCircuitLocalReads);
LOG.debug(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC
+ " = " + domainSocketDataTraffic);
LOG.debug(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY
+ " = " + domainSocketPath);
}
LOG.debug(DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL
+ " = {}", useLegacyBlockReaderLocal);
LOG.debug(Read.ShortCircuit.KEY
+ " = {}", shortCircuitLocalReads);
LOG.debug(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC
+ " = {}", domainSocketDataTraffic);
LOG.debug(DFS_DOMAIN_SOCKET_PATH_KEY
+ " = {}", domainSocketPath);
skipShortCircuitChecksums = conf.getBoolean(
HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_DEFAULT);
Read.ShortCircuit.SKIP_CHECKSUM_KEY,
Read.ShortCircuit.SKIP_CHECKSUM_DEFAULT);
shortCircuitBufferSize = conf.getInt(
HdfsClientConfigKeys.Read.ShortCircuit.BUFFER_SIZE_KEY,
HdfsClientConfigKeys.Read.ShortCircuit.BUFFER_SIZE_DEFAULT);
Read.ShortCircuit.BUFFER_SIZE_KEY,
Read.ShortCircuit.BUFFER_SIZE_DEFAULT);
shortCircuitStreamsCacheSize = conf.getInt(
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY,
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_DEFAULT);
Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY,
Read.ShortCircuit.STREAMS_CACHE_SIZE_DEFAULT);
shortCircuitStreamsCacheExpiryMs = conf.getLong(
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY,
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_DEFAULT);
Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY,
Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_DEFAULT);
shortCircuitMmapEnabled = conf.getBoolean(
HdfsClientConfigKeys.Mmap.ENABLED_KEY,
HdfsClientConfigKeys.Mmap.ENABLED_DEFAULT);
Mmap.ENABLED_KEY,
Mmap.ENABLED_DEFAULT);
shortCircuitMmapCacheSize = conf.getInt(
HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY,
HdfsClientConfigKeys.Mmap.CACHE_SIZE_DEFAULT);
Mmap.CACHE_SIZE_KEY,
Mmap.CACHE_SIZE_DEFAULT);
shortCircuitMmapCacheExpiryMs = conf.getLong(
HdfsClientConfigKeys.Mmap.CACHE_TIMEOUT_MS_KEY,
HdfsClientConfigKeys.Mmap.CACHE_TIMEOUT_MS_DEFAULT);
Mmap.CACHE_TIMEOUT_MS_KEY,
Mmap.CACHE_TIMEOUT_MS_DEFAULT);
shortCircuitMmapCacheRetryTimeout = conf.getLong(
HdfsClientConfigKeys.Mmap.RETRY_TIMEOUT_MS_KEY,
HdfsClientConfigKeys.Mmap.RETRY_TIMEOUT_MS_DEFAULT);
Mmap.RETRY_TIMEOUT_MS_KEY,
Mmap.RETRY_TIMEOUT_MS_DEFAULT);
shortCircuitCacheStaleThresholdMs = conf.getLong(
HdfsClientConfigKeys.ShortCircuit.REPLICA_STALE_THRESHOLD_MS_KEY,
HdfsClientConfigKeys.ShortCircuit.REPLICA_STALE_THRESHOLD_MS_DEFAULT);
ShortCircuit.REPLICA_STALE_THRESHOLD_MS_KEY,
ShortCircuit.REPLICA_STALE_THRESHOLD_MS_DEFAULT);
shortCircuitSharedMemoryWatcherInterruptCheckMs = conf.getInt(
DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,
DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT);
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT);
keyProviderCacheExpiryMs = conf.getLong(
DFSConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS,
DFSConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT);
DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS,
DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT);
}
/**
@ -731,35 +762,33 @@ public class DfsClientConf {
}
public String confAsString() {
StringBuilder builder = new StringBuilder();
builder.append("shortCircuitStreamsCacheSize = ").
append(shortCircuitStreamsCacheSize).
append(", shortCircuitStreamsCacheExpiryMs = ").
append(shortCircuitStreamsCacheExpiryMs).
append(", shortCircuitMmapCacheSize = ").
append(shortCircuitMmapCacheSize).
append(", shortCircuitMmapCacheExpiryMs = ").
append(shortCircuitMmapCacheExpiryMs).
append(", shortCircuitMmapCacheRetryTimeout = ").
append(shortCircuitMmapCacheRetryTimeout).
append(", shortCircuitCacheStaleThresholdMs = ").
append(shortCircuitCacheStaleThresholdMs).
append(", socketCacheCapacity = ").
append(socketCacheCapacity).
append(", socketCacheExpiry = ").
append(socketCacheExpiry).
append(", shortCircuitLocalReads = ").
append(shortCircuitLocalReads).
append(", useLegacyBlockReaderLocal = ").
append(useLegacyBlockReaderLocal).
append(", domainSocketDataTraffic = ").
append(domainSocketDataTraffic).
append(", shortCircuitSharedMemoryWatcherInterruptCheckMs = ").
append(shortCircuitSharedMemoryWatcherInterruptCheckMs).
append(", keyProviderCacheExpiryMs = ").
append(keyProviderCacheExpiryMs);
return builder.toString();
return "shortCircuitStreamsCacheSize = "
+ shortCircuitStreamsCacheSize
+ ", shortCircuitStreamsCacheExpiryMs = "
+ shortCircuitStreamsCacheExpiryMs
+ ", shortCircuitMmapCacheSize = "
+ shortCircuitMmapCacheSize
+ ", shortCircuitMmapCacheExpiryMs = "
+ shortCircuitMmapCacheExpiryMs
+ ", shortCircuitMmapCacheRetryTimeout = "
+ shortCircuitMmapCacheRetryTimeout
+ ", shortCircuitCacheStaleThresholdMs = "
+ shortCircuitCacheStaleThresholdMs
+ ", socketCacheCapacity = "
+ socketCacheCapacity
+ ", socketCacheExpiry = "
+ socketCacheExpiry
+ ", shortCircuitLocalReads = "
+ shortCircuitLocalReads
+ ", useLegacyBlockReaderLocal = "
+ useLegacyBlockReaderLocal
+ ", domainSocketDataTraffic = "
+ domainSocketDataTraffic
+ ", shortCircuitSharedMemoryWatcherInterruptCheckMs = "
+ shortCircuitSharedMemoryWatcherInterruptCheckMs
+ ", keyProviderCacheExpiryMs = "
+ keyProviderCacheExpiryMs;
}
}
}

View File

@ -15,15 +15,4 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.logaggregation;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@Private
/**
* This API is not exposed to end-users yet.
*/
public enum ContainerLogsRetentionPolicy {
APPLICATION_MASTER_ONLY, AM_AND_FAILED_CONTAINERS_ONLY, ALL_CONTAINERS
}
package org.apache.hadoop.hdfs.client.impl;

View File

@ -89,6 +89,13 @@ public final class HdfsConstants {
// The chunk size for striped block which is used by erasure coding
public static final int BLOCK_STRIPED_CELL_SIZE = 64 * 1024;
// Timeouts for communicating with DataNode for streaming writes/reads
public static final int READ_TIMEOUT = 60 * 1000;
public static final int READ_TIMEOUT_EXTENSION = 5 * 1000;
public static final int WRITE_TIMEOUT = 8 * 60 * 1000;
//for write pipeline
public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000;
// SafeMode actions
public enum SafeModeAction {
SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET

View File

@ -21,8 +21,6 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
@ -32,7 +30,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationH
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.security.token.Token;
@ -60,7 +58,7 @@ public abstract class DataTransferProtoUtil {
}
public static ChecksumProto toProto(DataChecksum checksum) {
ChecksumTypeProto type = PBHelper.convert(checksum.getChecksumType());
ChecksumTypeProto type = PBHelperClient.convert(checksum.getChecksumType());
// ChecksumType#valueOf never returns null
return ChecksumProto.newBuilder()
.setBytesPerChecksum(checksum.getBytesPerChecksum())
@ -72,7 +70,7 @@ public abstract class DataTransferProtoUtil {
if (proto == null) return null;
int bytesPerChecksum = proto.getBytesPerChecksum();
DataChecksum.Type type = PBHelper.convert(proto.getType());
DataChecksum.Type type = PBHelperClient.convert(proto.getType());
return DataChecksum.newDataChecksum(type, bytesPerChecksum);
}
@ -89,8 +87,8 @@ public abstract class DataTransferProtoUtil {
static BaseHeaderProto buildBaseHeader(ExtendedBlock blk,
Token<BlockTokenIdentifier> blockToken) {
BaseHeaderProto.Builder builder = BaseHeaderProto.newBuilder()
.setBlock(PBHelper.convert(blk))
.setToken(PBHelper.convert(blockToken));
.setBlock(PBHelperClient.convert(blk))
.setToken(PBHelperClient.convert(blockToken));
if (Trace.isTracing()) {
Span s = Trace.currentSpan();
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()

View File

@ -19,8 +19,6 @@ package org.apache.hadoop.hdfs.protocol.datatransfer;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.StorageType;
@ -32,13 +30,16 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Transfer data to/from datanode using a streaming protocol.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface DataTransferProtocol {
public static final Log LOG = LogFactory.getLog(DataTransferProtocol.class);
public static final Logger LOG = LoggerFactory.getLogger(DataTransferProtocol.class);
/** Version for data transfers between clients and datanodes
* This should change when serialization of DatanodeInfo, not just

View File

@ -37,7 +37,8 @@ public enum Op {
TRANSFER_BLOCK((byte)86),
REQUEST_SHORT_CIRCUIT_FDS((byte)87),
RELEASE_SHORT_CIRCUIT_FDS((byte)88),
REQUEST_SHORT_CIRCUIT_SHM((byte)89);
REQUEST_SHORT_CIRCUIT_SHM((byte)89),
CUSTOM((byte)127);
/** The code for this operation. */
public final byte code;

View File

@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockP
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
@ -140,9 +140,9 @@ public class Sender implements DataTransferProtocol {
OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
.setHeader(header)
.setStorageType(PBHelper.convertStorageType(storageType))
.addAllTargets(PBHelper.convert(targets, 1))
.addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1))
.setStorageType(PBHelperClient.convertStorageType(storageType))
.addAllTargets(PBHelperClient.convert(targets, 1))
.addAllTargetStorageTypes(PBHelperClient.convertStorageTypes(targetStorageTypes, 1))
.setStage(toProto(stage))
.setPipelineSize(pipelineSize)
.setMinBytesRcvd(minBytesRcvd)
@ -152,10 +152,10 @@ public class Sender implements DataTransferProtocol {
.setCachingStrategy(getCachingStrategy(cachingStrategy))
.setAllowLazyPersist(allowLazyPersist)
.setPinning(pinning)
.addAllTargetPinnings(PBHelper.convert(targetPinnings, 1));
.addAllTargetPinnings(PBHelperClient.convert(targetPinnings, 1));
if (source != null) {
proto.setSource(PBHelper.convertDatanodeInfo(source));
proto.setSource(PBHelperClient.convertDatanodeInfo(source));
}
send(out, Op.WRITE_BLOCK, proto.build());
@ -171,8 +171,8 @@ public class Sender implements DataTransferProtocol {
OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
.setHeader(DataTransferProtoUtil.buildClientHeader(
blk, clientName, blockToken))
.addAllTargets(PBHelper.convert(targets))
.addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes))
.addAllTargets(PBHelperClient.convert(targets))
.addAllTargetStorageTypes(PBHelperClient.convertStorageTypes(targetStorageTypes))
.build();
send(out, Op.TRANSFER_BLOCK, proto);
@ -188,7 +188,7 @@ public class Sender implements DataTransferProtocol {
.setHeader(DataTransferProtoUtil.buildBaseHeader(
blk, blockToken)).setMaxVersion(maxVersion);
if (slotId != null) {
builder.setSlotId(PBHelper.convert(slotId));
builder.setSlotId(PBHelperClient.convert(slotId));
}
builder.setSupportsReceiptVerification(supportsReceiptVerification);
OpRequestShortCircuitAccessProto proto = builder.build();
@ -199,7 +199,7 @@ public class Sender implements DataTransferProtocol {
public void releaseShortCircuitFds(SlotId slotId) throws IOException {
ReleaseShortCircuitAccessRequestProto.Builder builder =
ReleaseShortCircuitAccessRequestProto.newBuilder().
setSlotId(PBHelper.convert(slotId));
setSlotId(PBHelperClient.convert(slotId));
if (Trace.isTracing()) {
Span s = Trace.currentSpan();
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
@ -231,9 +231,9 @@ public class Sender implements DataTransferProtocol {
final DatanodeInfo source) throws IOException {
OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder()
.setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
.setStorageType(PBHelper.convertStorageType(storageType))
.setStorageType(PBHelperClient.convertStorageType(storageType))
.setDelHint(delHint)
.setSource(PBHelper.convertDatanodeInfo(source))
.setSource(PBHelperClient.convertDatanodeInfo(source))
.build();
send(out, Op.REPLACE_BLOCK, proto);

View File

@ -0,0 +1,254 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import com.google.common.collect.Lists;
import com.google.protobuf.ByteString;
import com.google.protobuf.CodedInputStream;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
import org.apache.hadoop.hdfs.util.ExactSizeInputStream;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
/**
* Utilities for converting protobuf classes to and from implementation classes
* and other helper utilities to help in dealing with protobuf.
*
* Note that when converting from an internal type to protobuf type, the
* converter never return null for protobuf type. The check for internal type
* being null must be done before calling the convert() method.
*/
public class PBHelperClient {
private PBHelperClient() {
/** Hidden constructor */
}
public static ByteString getByteString(byte[] bytes) {
return ByteString.copyFrom(bytes);
}
public static ShmId convert(ShortCircuitShmIdProto shmId) {
return new ShmId(shmId.getHi(), shmId.getLo());
}
public static DataChecksum.Type convert(HdfsProtos.ChecksumTypeProto type) {
return DataChecksum.Type.valueOf(type.getNumber());
}
public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) {
return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
}
public static ExtendedBlockProto convert(final ExtendedBlock b) {
if (b == null) return null;
return ExtendedBlockProto.newBuilder().
setPoolId(b.getBlockPoolId()).
setBlockId(b.getBlockId()).
setNumBytes(b.getNumBytes()).
setGenerationStamp(b.getGenerationStamp()).
build();
}
public static TokenProto convert(Token<?> tok) {
return TokenProto.newBuilder().
setIdentifier(ByteString.copyFrom(tok.getIdentifier())).
setPassword(ByteString.copyFrom(tok.getPassword())).
setKind(tok.getKind().toString()).
setService(tok.getService().toString()).build();
}
public static ShortCircuitShmIdProto convert(ShmId shmId) {
return ShortCircuitShmIdProto.newBuilder().
setHi(shmId.getHi()).
setLo(shmId.getLo()).
build();
}
public static ShortCircuitShmSlotProto convert(SlotId slotId) {
return ShortCircuitShmSlotProto.newBuilder().
setShmId(convert(slotId.getShmId())).
setSlotIdx(slotId.getSlotIdx()).
build();
}
public static DatanodeIDProto convert(DatanodeID dn) {
// For wire compatibility with older versions we transmit the StorageID
// which is the same as the DatanodeUuid. Since StorageID is a required
// field we pass the empty string if the DatanodeUuid is not yet known.
return DatanodeIDProto.newBuilder()
.setIpAddr(dn.getIpAddr())
.setHostName(dn.getHostName())
.setXferPort(dn.getXferPort())
.setDatanodeUuid(dn.getDatanodeUuid() != null ? dn.getDatanodeUuid() : "")
.setInfoPort(dn.getInfoPort())
.setInfoSecurePort(dn.getInfoSecurePort())
.setIpcPort(dn.getIpcPort()).build();
}
public static DatanodeInfoProto.AdminState convert(
final DatanodeInfo.AdminStates inAs) {
switch (inAs) {
case NORMAL: return DatanodeInfoProto.AdminState.NORMAL;
case DECOMMISSION_INPROGRESS:
return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS;
case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED;
default: return DatanodeInfoProto.AdminState.NORMAL;
}
}
public static DatanodeInfoProto convert(DatanodeInfo info) {
DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder();
if (info.getNetworkLocation() != null) {
builder.setLocation(info.getNetworkLocation());
}
builder
.setId(convert((DatanodeID) info))
.setCapacity(info.getCapacity())
.setDfsUsed(info.getDfsUsed())
.setRemaining(info.getRemaining())
.setBlockPoolUsed(info.getBlockPoolUsed())
.setCacheCapacity(info.getCacheCapacity())
.setCacheUsed(info.getCacheUsed())
.setLastUpdate(info.getLastUpdate())
.setLastUpdateMonotonic(info.getLastUpdateMonotonic())
.setXceiverCount(info.getXceiverCount())
.setAdminState(convert(info.getAdminState()))
.build();
return builder.build();
}
public static List<? extends HdfsProtos.DatanodeInfoProto> convert(
DatanodeInfo[] dnInfos) {
return convert(dnInfos, 0);
}
/**
* Copy from {@code dnInfos} to a target of list of same size starting at
* {@code startIdx}.
*/
public static List<? extends HdfsProtos.DatanodeInfoProto> convert(
DatanodeInfo[] dnInfos, int startIdx) {
if (dnInfos == null)
return null;
ArrayList<HdfsProtos.DatanodeInfoProto> protos = Lists
.newArrayListWithCapacity(dnInfos.length);
for (int i = startIdx; i < dnInfos.length; i++) {
protos.add(convert(dnInfos[i]));
}
return protos;
}
public static List<Boolean> convert(boolean[] targetPinnings, int idx) {
List<Boolean> pinnings = new ArrayList<>();
if (targetPinnings == null) {
pinnings.add(Boolean.FALSE);
} else {
for (; idx < targetPinnings.length; ++idx) {
pinnings.add(targetPinnings[idx]);
}
}
return pinnings;
}
static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
if (di == null) return null;
return convert(di);
}
public static StorageTypeProto convertStorageType(StorageType type) {
switch(type) {
case DISK:
return StorageTypeProto.DISK;
case SSD:
return StorageTypeProto.SSD;
case ARCHIVE:
return StorageTypeProto.ARCHIVE;
case RAM_DISK:
return StorageTypeProto.RAM_DISK;
default:
throw new IllegalStateException(
"BUG: StorageType not found, type=" + type);
}
}
public static StorageType convertStorageType(StorageTypeProto type) {
switch(type) {
case DISK:
return StorageType.DISK;
case SSD:
return StorageType.SSD;
case ARCHIVE:
return StorageType.ARCHIVE;
case RAM_DISK:
return StorageType.RAM_DISK;
default:
throw new IllegalStateException(
"BUG: StorageTypeProto not found, type=" + type);
}
}
public static List<StorageTypeProto> convertStorageTypes(
StorageType[] types) {
return convertStorageTypes(types, 0);
}
public static List<StorageTypeProto> convertStorageTypes(
StorageType[] types, int startIdx) {
if (types == null) {
return null;
}
final List<StorageTypeProto> protos = new ArrayList<>(
types.length);
for (int i = startIdx; i < types.length; ++i) {
protos.add(PBHelperClient.convertStorageType(types[i]));
}
return protos;
}
public static InputStream vintPrefixed(final InputStream input)
throws IOException {
final int firstByte = input.read();
if (firstByte == -1) {
throw new EOFException("Premature EOF: no length prefix available");
}
int size = CodedInputStream.readRawVarint32(firstByte, input);
assert size >= 0;
return new ExactSizeInputStream(input, size);
}
}

View File

@ -29,17 +29,15 @@ import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
@ -50,7 +48,8 @@ import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class BlockMetadataHeader {
private static final Log LOG = LogFactory.getLog(BlockMetadataHeader.class);
private static final Logger LOG = LoggerFactory.getLogger(
BlockMetadataHeader.class);
public static final short VERSION = 1;
@ -62,8 +61,6 @@ public class BlockMetadataHeader {
private final short version;
private DataChecksum checksum = null;
private static final HdfsConfiguration conf = new HdfsConfiguration();
@VisibleForTesting
public BlockMetadataHeader(short version, DataChecksum checksum) {
this.checksum = checksum;
@ -84,11 +81,12 @@ public class BlockMetadataHeader {
* Read the checksum header from the meta file.
* @return the data checksum obtained from the header.
*/
public static DataChecksum readDataChecksum(File metaFile) throws IOException {
public static DataChecksum readDataChecksum(File metaFile, int bufSize)
throws IOException {
DataInputStream in = null;
try {
in = new DataInputStream(new BufferedInputStream(
new FileInputStream(metaFile), DFSUtil.getIoFileBufferSize(conf)));
new FileInputStream(metaFile), bufSize));
return readDataChecksum(in, metaFile);
} finally {
IOUtils.closeStream(in);

View File

@ -22,15 +22,15 @@ import org.apache.hadoop.classification.InterfaceAudience;
import java.io.Closeable;
import java.nio.MappedByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A reference to a memory-mapped region used by an HDFS client.
*/
@InterfaceAudience.Private
public class ClientMmap implements Closeable {
static final Log LOG = LogFactory.getLog(ClientMmap.class);
static final Logger LOG = LoggerFactory.getLogger(ClientMmap.class);
/**
* A reference to the block replica which this mmap relates to.

View File

@ -30,8 +30,6 @@ import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.lang.mutable.MutableBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.net.DomainPeer;
@ -39,17 +37,18 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.DomainSocketWatcher;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Manages short-circuit memory segments for an HDFS client.
*
@ -63,7 +62,8 @@ import com.google.common.base.Preconditions;
*/
@InterfaceAudience.Private
public class DfsClientShmManager implements Closeable {
private static final Log LOG = LogFactory.getLog(DfsClientShmManager.class);
private static final Logger LOG = LoggerFactory.getLogger(
DfsClientShmManager.class);
/**
* Manages short-circuit memory segments that pertain to a given DataNode.
@ -168,7 +168,7 @@ public class DfsClientShmManager implements Closeable {
new Sender(out).requestShortCircuitShm(clientName);
ShortCircuitShmResponseProto resp =
ShortCircuitShmResponseProto.parseFrom(
PBHelper.vintPrefixed(peer.getInputStream()));
PBHelperClient.vintPrefixed(peer.getInputStream()));
String error = resp.hasError() ? resp.getError() : "(unknown)";
switch (resp.getStatus()) {
case SUCCESS:
@ -185,14 +185,18 @@ public class DfsClientShmManager implements Closeable {
}
try {
DfsClientShm shm =
new DfsClientShm(PBHelper.convert(resp.getId()),
new DfsClientShm(PBHelperClient.convert(resp.getId()),
fis[0], this, peer);
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": createNewShm: created " + shm);
}
return shm;
} finally {
IOUtils.cleanup(LOG, fis[0]);
try {
fis[0].close();
} catch (Throwable e) {
LOG.debug("Exception in closing " + fis[0], e);
}
}
case ERROR_UNSUPPORTED:
// The DataNode just does not support short-circuit shared memory
@ -497,7 +501,11 @@ public class DfsClientShmManager implements Closeable {
}
// When closed, the domainSocketWatcher will issue callbacks that mark
// all the outstanding DfsClientShm segments as stale.
IOUtils.cleanup(LOG, domainSocketWatcher);
try {
domainSocketWatcher.close();
} catch (Throwable e) {
LOG.debug("Exception in closing " + domainSocketWatcher, e);
}
}

View File

@ -23,11 +23,9 @@ import java.util.concurrent.TimeUnit;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.util.PerformanceAdvisory;
@ -36,8 +34,12 @@ import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DomainSocketFactory {
private static final Log LOG = LogFactory.getLog(DomainSocketFactory.class);
private static final Logger LOG = LoggerFactory.getLogger(
DomainSocketFactory.class);
public enum PathState {
UNUSABLE(false, false),
@ -112,7 +114,7 @@ public class DomainSocketFactory {
} else {
if (conf.getDomainSocketPath().isEmpty()) {
throw new HadoopIllegalArgumentException(feature + " is enabled but "
+ DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set.");
+ HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set.");
} else if (DomainSocket.getLoadingFailureReason() != null) {
LOG.warn(feature + " cannot be used because "
+ DomainSocket.getLoadingFailureReason());
@ -145,7 +147,7 @@ public class DomainSocketFactory {
return PathInfo.NOT_CONFIGURED;
}
// UNIX domain sockets can only be used to talk to local peers
if (!DFSClient.isLocalAddress(addr)) return PathInfo.NOT_CONFIGURED;
if (!DFSUtilClient.isLocalAddress(addr)) return PathInfo.NOT_CONFIGURED;
String escapedPath = DomainSocket.getEffectivePath(
conf.getDomainSocketPath(), addr.getPort());
PathState status = pathMap.getIfPresent(escapedPath);

View File

@ -34,8 +34,6 @@ import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.lang.mutable.MutableBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
@ -44,9 +42,9 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.hdfs.util.IOUtilsClient;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.DomainSocketWatcher;
@ -59,6 +57,9 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The ShortCircuitCache tracks things which the client needs to access
* HDFS block files via short-circuit.
@ -68,7 +69,8 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
*/
@InterfaceAudience.Private
public class ShortCircuitCache implements Closeable {
public static final Log LOG = LogFactory.getLog(ShortCircuitCache.class);
public static final Logger LOG = LoggerFactory.getLogger(
ShortCircuitCache.class);
/**
* Expiry thread which makes sure that the file descriptors get closed
@ -189,19 +191,16 @@ public class ShortCircuitCache implements Closeable {
}
final DfsClientShm shm = (DfsClientShm)slot.getShm();
final DomainSocket shmSock = shm.getPeer().getDomainSocket();
DomainSocket sock = null;
DataOutputStream out = null;
final String path = shmSock.getPath();
boolean success = false;
try {
sock = DomainSocket.connect(path);
out = new DataOutputStream(
new BufferedOutputStream(sock.getOutputStream()));
try (DomainSocket sock = DomainSocket.connect(path);
DataOutputStream out = new DataOutputStream(
new BufferedOutputStream(sock.getOutputStream()))) {
new Sender(out).releaseShortCircuitFds(slot.getSlotId());
DataInputStream in = new DataInputStream(sock.getInputStream());
ReleaseShortCircuitAccessResponseProto resp =
ReleaseShortCircuitAccessResponseProto.parseFrom(
PBHelper.vintPrefixed(in));
PBHelperClient.vintPrefixed(in));
if (resp.getStatus() != Status.SUCCESS) {
String error = resp.hasError() ? resp.getError() : "(unknown)";
throw new IOException(resp.getStatus().toString() + ": " + error);
@ -221,7 +220,6 @@ public class ShortCircuitCache implements Closeable {
} else {
shm.getEndpointShmManager().shutdown(shm);
}
IOUtils.cleanup(LOG, sock, out);
}
}
}
@ -890,7 +888,7 @@ public class ShortCircuitCache implements Closeable {
maxNonMmappedEvictableLifespanMs = 0;
maxEvictableMmapedSize = 0;
// Close and join cacheCleaner thread.
IOUtils.cleanup(LOG, cacheCleaner);
IOUtilsClient.cleanup(LOG, cacheCleaner);
// Purge all replicas.
while (true) {
Entry<Long, ShortCircuitReplica> entry = evictable.firstEntry();
@ -933,7 +931,7 @@ public class ShortCircuitCache implements Closeable {
LOG.error("Interrupted while waiting for CleanerThreadPool "
+ "to terminate", e);
}
IOUtils.cleanup(LOG, shmManager);
IOUtilsClient.cleanup(LOG, shmManager);
}
@VisibleForTesting // ONLY for testing

View File

@ -23,12 +23,11 @@ import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileChannel.MapMode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.hdfs.util.IOUtilsClient;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Time;
@ -36,6 +35,9 @@ import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A ShortCircuitReplica object contains file descriptors for a block that
* we are reading via short-circuit local reads.
@ -46,7 +48,8 @@ import com.google.common.base.Preconditions;
*/
@InterfaceAudience.Private
public class ShortCircuitReplica {
public static final Log LOG = LogFactory.getLog(ShortCircuitCache.class);
public static final Logger LOG = LoggerFactory.getLogger(
ShortCircuitCache.class);
/**
* Identifies this ShortCircuitReplica object.
@ -253,7 +256,7 @@ public class ShortCircuitReplica {
suffix += " munmapped.";
}
}
IOUtils.cleanup(LOG, dataStream, metaStream);
IOUtilsClient.cleanup(LOG, dataStream, metaStream);
if (slot != null) {
cache.scheduleSlotReleaser(slot);
if (LOG.isTraceEnabled()) {

View File

@ -27,8 +27,6 @@ import java.util.Random;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.io.nativeio.NativeIO;
@ -36,6 +34,9 @@ import org.apache.hadoop.io.nativeio.NativeIO.POSIX;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import sun.misc.Unsafe;
import com.google.common.base.Preconditions;
@ -46,7 +47,7 @@ import com.google.common.primitives.Ints;
* A shared memory segment used to implement short-circuit reads.
*/
public class ShortCircuitShm {
private static final Log LOG = LogFactory.getLog(ShortCircuitShm.class);
private static final Logger LOG = LoggerFactory.getLogger(ShortCircuitShm.class);
protected static final int BYTES_PER_SLOT = 64;

View File

@ -22,29 +22,30 @@ import java.util.LinkedList;
import java.util.Map;
import java.util.Queue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.Time;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Manage byte array creation and release.
* Manage byte array creation and release.
*/
@InterfaceAudience.Private
public abstract class ByteArrayManager {
static final Log LOG = LogFactory.getLog(ByteArrayManager.class);
private static final ThreadLocal<StringBuilder> debugMessage = new ThreadLocal<StringBuilder>() {
static final Logger LOG = LoggerFactory.getLogger(ByteArrayManager.class);
private static final ThreadLocal<StringBuilder> DEBUG_MESSAGE =
new ThreadLocal<StringBuilder>() {
protected StringBuilder initialValue() {
return new StringBuilder();
}
};
private static void logDebugMessage() {
final StringBuilder b = debugMessage.get();
LOG.debug(b);
final StringBuilder b = DEBUG_MESSAGE.get();
LOG.debug(b.toString());
b.setLength(0);
}
@ -97,7 +98,7 @@ public abstract class ByteArrayManager {
/**
* Increment the counter, and reset it if there is no increment
* for acertain time period.
* for a certain time period.
*
* @return the new count.
*/
@ -112,10 +113,10 @@ public abstract class ByteArrayManager {
}
/** A map from integers to counters. */
static class CounterMap {
static final class CounterMap {
/** @see ByteArrayManager.Conf#countResetTimePeriodMs */
private final long countResetTimePeriodMs;
private final Map<Integer, Counter> map = new HashMap<Integer, Counter>();
private final Map<Integer, Counter> map = new HashMap<>();
private CounterMap(long countResetTimePeriodMs) {
this.countResetTimePeriodMs = countResetTimePeriodMs;
@ -125,7 +126,8 @@ public abstract class ByteArrayManager {
* @return the counter for the given key;
* and create a new counter if it does not exist.
*/
synchronized Counter get(final Integer key, final boolean createIfNotExist) {
synchronized Counter get(final Integer key, final boolean
createIfNotExist) {
Counter count = map.get(key);
if (count == null && createIfNotExist) {
count = new Counter(countResetTimePeriodMs);
@ -133,17 +135,13 @@ public abstract class ByteArrayManager {
}
return count;
}
synchronized void clear() {
map.clear();
}
}
/** Manage byte arrays with the same fixed length. */
static class FixedLengthManager {
private final int byteArrayLength;
private final int maxAllocated;
private final Queue<byte[]> freeQueue = new LinkedList<byte[]>();
private final Queue<byte[]> freeQueue = new LinkedList<>();
private int numAllocated = 0;
@ -157,31 +155,31 @@ public abstract class ByteArrayManager {
*
* If the number of allocated arrays >= maximum, the current thread is
* blocked until the number of allocated arrays drops to below the maximum.
*
*
* The byte array allocated by this method must be returned for recycling
* via the {@link FixedLengthManager#recycle(byte[])} method.
*/
synchronized byte[] allocate() throws InterruptedException {
if (LOG.isDebugEnabled()) {
debugMessage.get().append(", ").append(this);
DEBUG_MESSAGE.get().append(", ").append(this);
}
for(; numAllocated >= maxAllocated;) {
if (LOG.isDebugEnabled()) {
debugMessage.get().append(": wait ...");
DEBUG_MESSAGE.get().append(": wait ...");
logDebugMessage();
}
wait();
if (LOG.isDebugEnabled()) {
debugMessage.get().append("wake up: ").append(this);
DEBUG_MESSAGE.get().append("wake up: ").append(this);
}
}
numAllocated++;
final byte[] array = freeQueue.poll();
if (LOG.isDebugEnabled()) {
debugMessage.get().append(", recycled? ").append(array != null);
DEBUG_MESSAGE.get().append(", recycled? ").append(array != null);
}
return array != null? array : new byte[byteArrayLength];
}
@ -197,7 +195,7 @@ public abstract class ByteArrayManager {
Preconditions.checkNotNull(array);
Preconditions.checkArgument(array.length == byteArrayLength);
if (LOG.isDebugEnabled()) {
debugMessage.get().append(", ").append(this);
DEBUG_MESSAGE.get().append(", ").append(this);
}
notify();
@ -210,7 +208,7 @@ public abstract class ByteArrayManager {
if (freeQueue.size() < maxAllocated - numAllocated) {
if (LOG.isDebugEnabled()) {
debugMessage.get().append(", freeQueue.offer");
DEBUG_MESSAGE.get().append(", freeQueue.offer");
}
freeQueue.offer(array);
}
@ -227,7 +225,7 @@ public abstract class ByteArrayManager {
/** A map from array lengths to byte array managers. */
static class ManagerMap {
private final int countLimit;
private final Map<Integer, FixedLengthManager> map = new HashMap<Integer, FixedLengthManager>();
private final Map<Integer, FixedLengthManager> map = new HashMap<>();
ManagerMap(int countLimit) {
this.countLimit = countLimit;
@ -243,12 +241,11 @@ public abstract class ByteArrayManager {
}
return manager;
}
synchronized void clear() {
map.clear();
}
}
/**
* Configuration for ByteArrayManager.
*/
public static class Conf {
/**
* The count threshold for each array length so that a manager is created
@ -265,7 +262,8 @@ public abstract class ByteArrayManager {
*/
private final long countResetTimePeriodMs;
public Conf(int countThreshold, int countLimit, long countResetTimePeriodMs) {
public Conf(int countThreshold, int countLimit, long
countResetTimePeriodMs) {
this.countThreshold = countThreshold;
this.countLimit = countLimit;
this.countResetTimePeriodMs = countResetTimePeriodMs;
@ -277,20 +275,20 @@ public abstract class ByteArrayManager {
* the returned array is larger than or equal to the given length.
*
* The current thread may be blocked if some resource is unavailable.
*
*
* The byte array created by this method must be released
* via the {@link ByteArrayManager#release(byte[])} method.
*
* @return a byte array with length larger than or equal to the given length.
*/
public abstract byte[] newByteArray(int size) throws InterruptedException;
/**
* Release the given byte array.
*
*
* The byte array may or may not be created
* by the {@link ByteArrayManager#newByteArray(int)} method.
*
*
* @return the number of free array.
*/
public abstract int release(byte[] array);
@ -307,7 +305,7 @@ public abstract class ByteArrayManager {
public byte[] newByteArray(int size) throws InterruptedException {
return new byte[size];
}
@Override
public int release(byte[] array) {
return 0;
@ -320,38 +318,41 @@ public abstract class ByteArrayManager {
*/
static class Impl extends ByteArrayManager {
private final Conf conf;
private final CounterMap counters;
private final ManagerMap managers;
Impl(Conf conf) {
this.conf = conf;
this.counters = new CounterMap(conf.countResetTimePeriodMs);
this.managers = new ManagerMap(conf.countLimit);
}
/**
* Allocate a byte array, where the length of the allocated array
* is the least power of two of the given length
* unless the given length is less than {@link #MIN_ARRAY_LENGTH}.
* In such case, the returned array length is equal to {@link #MIN_ARRAY_LENGTH}.
* In such case, the returned array length is equal to {@link
* #MIN_ARRAY_LENGTH}.
*
* If the number of allocated arrays exceeds the capacity,
* the current thread is blocked until
* the number of allocated arrays drops to below the capacity.
*
*
* The byte array allocated by this method must be returned for recycling
* via the {@link Impl#release(byte[])} method.
*
* @return a byte array with length larger than or equal to the given length.
* @return a byte array with length larger than or equal to the given
* length.
*/
@Override
public byte[] newByteArray(final int arrayLength) throws InterruptedException {
public byte[] newByteArray(final int arrayLength)
throws InterruptedException {
Preconditions.checkArgument(arrayLength >= 0);
if (LOG.isDebugEnabled()) {
debugMessage.get().append("allocate(").append(arrayLength).append(")");
DEBUG_MESSAGE.get().append("allocate(").append(arrayLength).append(")");
}
final byte[] array;
if (arrayLength == 0) {
array = EMPTY_BYTE_ARRAY;
@ -361,37 +362,40 @@ public abstract class ByteArrayManager {
final long count = counters.get(powerOfTwo, true).increment();
final boolean aboveThreshold = count > conf.countThreshold;
// create a new manager only if the count is above threshold.
final FixedLengthManager manager = managers.get(powerOfTwo, aboveThreshold);
final FixedLengthManager manager =
managers.get(powerOfTwo, aboveThreshold);
if (LOG.isDebugEnabled()) {
debugMessage.get().append(": count=").append(count)
DEBUG_MESSAGE.get().append(": count=").append(count)
.append(aboveThreshold? ", aboveThreshold": ", belowThreshold");
}
array = manager != null? manager.allocate(): new byte[powerOfTwo];
}
if (LOG.isDebugEnabled()) {
debugMessage.get().append(", return byte[").append(array.length).append("]");
DEBUG_MESSAGE.get().append(", return byte[")
.append(array.length).append("]");
logDebugMessage();
}
return array;
}
/**
* Recycle the given byte array.
*
*
* The byte array may or may not be allocated
* by the {@link Impl#newByteArray(int)} method.
*
*
* This is a non-blocking call.
*/
@Override
public int release(final byte[] array) {
Preconditions.checkNotNull(array);
if (LOG.isDebugEnabled()) {
debugMessage.get().append("recycle: array.length=").append(array.length);
DEBUG_MESSAGE.get()
.append("recycle: array.length=").append(array.length);
}
final int freeQueueSize;
if (array.length == 0) {
freeQueueSize = -1;
@ -399,18 +403,18 @@ public abstract class ByteArrayManager {
final FixedLengthManager manager = managers.get(array.length, false);
freeQueueSize = manager == null? -1: manager.recycle(array);
}
if (LOG.isDebugEnabled()) {
debugMessage.get().append(", freeQueueSize=").append(freeQueueSize);
DEBUG_MESSAGE.get().append(", freeQueueSize=").append(freeQueueSize);
logDebugMessage();
}
return freeQueueSize;
}
CounterMap getCounters() {
return counters;
}
ManagerMap getManagers() {
return managers;
}

View File

@ -0,0 +1,46 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import org.slf4j.Logger;
import java.io.IOException;
public class IOUtilsClient {
/**
* Close the Closeable objects and <b>ignore</b> any {@link IOException} or
* null pointers. Must only be used for cleanup in exception handlers.
*
* @param log the log to record problems to at debug level. Can be null.
* @param closeables the objects to close
*/
public static void cleanup(Logger log, java.io.Closeable... closeables) {
for (java.io.Closeable c : closeables) {
if (c != null) {
try {
c.close();
} catch(Throwable e) {
if (log != null && log.isDebugEnabled()) {
log.debug("Exception in closing " + c, e);
}
}
}
}
}
}

View File

@ -0,0 +1,18 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.DelegationTokenRenewer;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -1166,6 +1167,25 @@ public class WebHdfsFileSystem extends FileSystem
).run();
}
@Override
@SuppressWarnings("deprecation")
public FSDataOutputStream createNonRecursive(final Path f,
final FsPermission permission, final EnumSet<CreateFlag> flag,
final int bufferSize, final short replication, final long blockSize,
final Progressable progress) throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.CREATE;
return new FsPathOutputStreamRunner(op, f, bufferSize,
new PermissionParam(applyUMask(permission)),
new CreateFlagParam(flag),
new CreateParentParam(false),
new BufferSizeParam(bufferSize),
new ReplicationParam(replication),
new BlockSizeParam(blockSize)
).run();
}
@Override
public FSDataOutputStream append(final Path f, final int bufferSize,
final Progressable progress) throws IOException {

View File

@ -0,0 +1,48 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
import org.apache.hadoop.fs.CreateFlag;
import java.util.EnumSet;
/**
* CreateFlag enum.
*/
public class CreateFlagParam extends EnumSetParam<CreateFlag> {
public static final String NAME = "createflag";
public static final String DEFAULT = "";
private static final Domain<CreateFlag> DOMAIN = new Domain<CreateFlag>(
NAME, CreateFlag.class);
public CreateFlagParam(final EnumSet<CreateFlag> createFlags) {
super(DOMAIN, createFlags);
}
public CreateFlagParam(final String str) {
super(DOMAIN, DOMAIN.parse(str));
}
@Override
public String getName() {
return NAME;
}
}

View File

@ -22,7 +22,7 @@ public class CreateParentParam extends BooleanParam {
/** Parameter name. */
public static final String NAME = "createparent";
/** Default parameter value. */
public static final String DEFAULT = FALSE;
public static final String DEFAULT = TRUE;
private static final Domain DOMAIN = new Domain(NAME);

View File

@ -101,32 +101,6 @@ message GetBlockLocalPathInfoResponseProto {
required string localMetaPath = 3;
}
/**
* Query for the disk locations of a number of blocks on this DN.
* blockPoolId - the pool to query
* blockIds - list of block IDs to query
* tokens - list of access tokens corresponding to list of block IDs
*/
message GetHdfsBlockLocationsRequestProto {
// Removed: HDFS-3969
// repeated ExtendedBlockProto blocks = 1;
repeated hadoop.common.TokenProto tokens = 2;
required string blockPoolId = 3;
repeated sfixed64 blockIds = 4 [ packed = true ];
}
/**
* volumeIds - id of each volume, potentially multiple bytes
* volumeIndexes - for each block, an index into volumeIds specifying the volume
* on which it is located. If block is not present on any volume,
* index is set to MAX_INT.
*/
message GetHdfsBlockLocationsResponseProto {
repeated bytes volumeIds = 1;
repeated uint32 volumeIndexes = 2 [ packed = true ];
}
/**
* forUpgrade - if true, clients are advised to wait for restart and quick
* upgrade restart is instrumented. Otherwise, datanode does
@ -219,13 +193,6 @@ service ClientDatanodeProtocolService {
rpc getBlockLocalPathInfo(GetBlockLocalPathInfoRequestProto)
returns(GetBlockLocalPathInfoResponseProto);
/**
* Retrieve additional HDFS-specific metadata about a set of blocks stored
* on the local file system.
*/
rpc getHdfsBlockLocations(GetHdfsBlockLocationsRequestProto)
returns(GetHdfsBlockLocationsResponseProto);
rpc shutdownDatanode(ShutdownDatanodeRequestProto)
returns(ShutdownDatanodeResponseProto);

View File

@ -302,3 +302,7 @@ message OpBlockChecksumResponseProto {
required bytes md5 = 3;
optional ChecksumTypeProto crcType = 4;
}
message OpCustomProto {
required string customId = 1;
}

View File

@ -34,6 +34,8 @@ Trunk (Unreleased)
HDFS-8591. Remove support for deprecated configuration key
dfs.namenode.decommission.nodes.per.interval. (wang)
HDFS-8895. Remove deprecated BlockStorageLocation APIs. (wang)
NEW FEATURES
HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
@ -357,6 +359,9 @@ Release 2.8.0 - UNRELEASED
IMPROVEMENTS
HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value
(Gautam Gopalakrishnan via harsh)
HDFS-8821. Explain message "Operation category X is not supported
in state standby" (Gautam Gopalakrishnan via harsh)
@ -796,6 +801,60 @@ Release 2.8.0 - UNRELEASED
HDFS-6407. Add sorting and pagination in the datanode tab of the NN Web UI.
(wheat9)
HDFS-8801. Convert BlockInfoUnderConstruction as a feature.
(Jing Zhao via wheat9)
HDFS-8880. NameNode metrics logging. (Arpit Agarwal)
HDFS-8278. When computing max-size-to-move in Balancer, count only the
storage with remaining >= default block size. (szetszwo)
HDFS-8435. Support CreateFlag in WebHDFS. (Jakob Homan via cdouglas)
HDFS-8826. In Balancer, add an option to specify the source node list
so that balancer only selects blocks to move from those nodes. (szetszwo)
HDFS-8911. NameNode Metric : Add Editlog counters as a JMX metric.
(Anu Engineer via Arpit Agarwal)
HDFS-8803. Move DfsClientConf to hdfs-client. (Mingliang Liu via wheat9)
HDFS-8917. Cleanup BlockInfoUnderConstruction from comments and tests.
(Zhe Zhang via jing9)
HDFS-8884. Fail-fast check in BlockPlacementPolicyDefault#chooseTarget.
(yliu)
HDFS-8828. Utilize Snapshot diff report to build diff copy list in distcp.
(Yufei Gu via Yongjun Zhang)
HDFS-8823. Move replication factor into individual blocks. (wheat9)
HDFS-8934. Move ShortCircuitShm to hdfs-client. (Mingliang Liu via wheat9)
HDFS-8928. Improvements for BlockUnderConstructionFeature:
ReplicaUnderConstruction as a separate class and replicas as an array.
(jing9)
HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)
HDFS-8846. Add a unit test for INotify functionality across a layout
version upgrade (Zhe Zhang via Colin P. McCabe)
HDFS-8951. Move the shortcircuit package to hdfs-client.
(Mingliang Liu via wheat9)
HDFS-8896. DataNode object isn't GCed when shutdown, because it has GC
root in ShutdownHookManager. (Walter Su via jing9)
HDFS-8248. Store INodeId instead of the INodeFile object in
BlockInfoContiguous. (wheat9)
HDFS-8962. Clean up checkstyle warnings in o.a.h.hdfs.DfsClientConf.
(Mingliang Liu via wheat9)
HDFS-8865. Improve quota initialization performance. (kihwal)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
@ -804,6 +863,14 @@ Release 2.8.0 - UNRELEASED
HDFS-7433. Optimize performance of DatanodeManager's node map.
(daryn via kihwal)
HDFS-8792. BlockManager#postponedMisreplicatedBlocks should use a
LightWeightHashSet to save memory (Yi Liu via Colin P. McCabe)
HDFS-8845. DiskChecker should not traverse the entire tree (Chang Li via
Colin P. McCabe)
HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)
BUG FIXES
HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
@ -1149,7 +1216,37 @@ Release 2.8.0 - UNRELEASED
HDFS-8565. Typo in dfshealth.html - Decomissioning. (nijel via xyao)
HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
HDFS-8908. TestAppendSnapshotTruncate may fail with IOException: Failed to
replace a bad datanode. (Tsz Wo Nicholas Sze via yliu)
HDFS-8922. Link the native_mini_dfs test library with libdl, since IBM Java
requires it (Ayappan via Colin P. McCabe)
HDFS-8809. HDFS fsck reports under construction blocks as "CORRUPT". (jing9)
HDFS-8942. Update hyperlink to rack awareness page in HDFS Architecture
documentation. (Masatake Iwasaki via aajisaka)
HDFS-8930. Block report lease may leak if the 2nd full block report comes
when NN is still in safemode (Colin P. McCabe via Jing Zhao)
HDFS-8948. Use GenericTestUtils to set log levels in TestPread and
TestReplaceDatanodeOnFailure. (Mingliang Liu via wheat9)
HDFS-8932. NPE thrown in NameNode when try to get TotalSyncCount metric
before editLogStream initialization. (Surendra Singh Lilhore via xyao)
HDFS-8682. Should not remove decommissioned node,while calculating the
number of live/dead decommissioned node. (J. Andreina via vinayakumarb)
HDFS-8961. Investigate lock issue in o.a.h.hdfs.shortcircuit.
DfsClientShmManager.EndpointShmManager. (Mingliang Liu via wheat9)
HDFS-8969. Clean up findbugs warnings for HDFS-8823 and HDFS-8932.
(Anu Engineer via wheat9)
HDFS-8963. Fix incorrect sign extension of xattr length in HDFS-8900.
(Colin Patrick McCabe via yliu)
Release 2.7.2 - UNRELEASED
@ -1159,11 +1256,11 @@ Release 2.7.2 - UNRELEASED
IMPROVEMENTS
HDFS-8659. Block scanner INFO message is spamming logs. (Yongjun Zhang)
HDFS-8659. Block scanner INFO message is spamming logs. (Yongjun Zhang)
OPTIMIZATIONS
HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
BUG FIXES
@ -1173,6 +1270,16 @@ Release 2.7.2 - UNRELEASED
HDFS-8806. Inconsistent metrics: number of missing blocks with replication
factor 1 not properly cleared. (Zhe Zhang via aajisaka)
HDFS-8852. HDFS architecture documentation of version 2.x is outdated
about append write support. (Ajith S via aajisaka)
HDFS-8867. Enable optimized block reports. (Daryn Sharp via jing9)
HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is
flawed. (Kihwal Lee via yliu)
HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
Release 2.7.1 - 2015-07-06
INCOMPATIBLE CHANGES
@ -2302,6 +2409,18 @@ Release 2.7.0 - 2015-04-20
HDFS-7700. Document quota support for storage types. (Xiaoyu Yao via
Arpit Agarwal)
Release 2.6.2 - UNRELEASED
INCOMPATIBLE CHANGES
NEW FEATURES
IMPROVEMENTS
OPTIMIZATIONS
BUG FIXES
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -176,16 +176,6 @@
<Method name="setDirInternal" />
<Bug pattern="DM_STRING_CTOR" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager$EndpointShmManager" />
<Method name="allocSlot" />
<Bug pattern="UL_UNRELEASED_LOCK_EXCEPTION_PATH" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager$EndpointShmManager" />
<Method name="allocSlot" />
<Bug pattern="UL_UNRELEASED_LOCK" />
</Match>
<!-- Manually verified to be okay, we want to throw away the top bit here -->
<Match>
<Class name="org.apache.hadoop.hdfs.server.namenode.CachedBlock" />

View File

@ -144,6 +144,7 @@ add_library(native_mini_dfs
)
target_link_libraries(native_mini_dfs
${JAVA_JVM_LIBRARY}
${LIB_DL}
${OS_LINK_LIBRARIES}
)

View File

@ -1,52 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Wrapper for {@link BlockLocation} that also adds {@link VolumeId} volume
* location information for each replica.
*/
@InterfaceStability.Unstable
@InterfaceAudience.Public
@Deprecated
public class BlockStorageLocation extends BlockLocation {
private final VolumeId[] volumeIds;
public BlockStorageLocation(BlockLocation loc, VolumeId[] volumeIds)
throws IOException {
// Initialize with data from passed in BlockLocation
super(loc.getNames(), loc.getHosts(), loc.getTopologyPaths(), loc
.getOffset(), loc.getLength(), loc.isCorrupt());
this.volumeIds = volumeIds;
}
/**
* Gets the list of {@link VolumeId} corresponding to the block's replicas.
*
* @return volumeIds list of VolumeId for the block's replicas
*/
public VolumeId[] getVolumeIds() {
return volumeIds;
}
}

View File

@ -1,73 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Preconditions;
/**
* HDFS-specific volume identifier which implements {@link VolumeId}. Can be
* used to differentiate between the data directories on a single datanode. This
* identifier is only unique on a per-datanode basis.
*/
@InterfaceStability.Unstable
@InterfaceAudience.Public
public class HdfsVolumeId implements VolumeId {
private final byte[] id;
public HdfsVolumeId(byte[] id) {
Preconditions.checkNotNull(id, "id cannot be null");
this.id = id;
}
@Override
public int compareTo(VolumeId arg0) {
if (arg0 == null) {
return 1;
}
return hashCode() - arg0.hashCode();
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(id).toHashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
if (obj == this) {
return true;
}
HdfsVolumeId that = (HdfsVolumeId) obj;
return new EqualsBuilder().append(this.id, that.id).isEquals();
}
@Override
public String toString() {
return StringUtils.byteToHexString(id);
}
}

View File

@ -24,8 +24,12 @@ import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.net.InetSocketAddress;
import java.util.List;
import com.google.common.io.ByteArrayDataOutput;
import com.google.common.io.ByteStreams;
import org.apache.commons.lang.mutable.MutableBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -41,7 +45,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
@ -91,7 +95,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
/**
* Injects failures into specific operations during unit tests.
*/
private final FailureInjector failureInjector;
private static FailureInjector failureInjector = new FailureInjector();
/**
* The file name, for logging and debugging purposes.
@ -187,7 +191,6 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
public BlockReaderFactory(DfsClientConf conf) {
this.conf = conf;
this.failureInjector = conf.getShortCircuitConf().brfFailureInjector;
this.remainingCacheTries = conf.getNumCachedConnRetry();
}
@ -278,6 +281,11 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
return this;
}
@VisibleForTesting
public static void setFailureInjectorForTesting(FailureInjector injector) {
failureInjector = injector;
}
/**
* Build a BlockReader with the given options.
*
@ -319,9 +327,11 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
* If there was another problem.
*/
public BlockReader build() throws IOException {
BlockReader reader = null;
Preconditions.checkNotNull(configuration);
BlockReader reader = tryToCreateExternalBlockReader();
if (reader != null) {
return reader;
}
final ShortCircuitConf scConf = conf.getShortCircuitConf();
if (scConf.isShortCircuitLocalReads() && allowShortCircuitLocalReads) {
if (clientContext.getUseLegacyBlockReaderLocal()) {
@ -358,6 +368,45 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
return getRemoteBlockReaderFromTcp();
}
private BlockReader tryToCreateExternalBlockReader() {
List<Class<? extends ReplicaAccessorBuilder>> clses =
conf.getReplicaAccessorBuilderClasses();
for (Class<? extends ReplicaAccessorBuilder> cls : clses) {
try {
ByteArrayDataOutput bado = ByteStreams.newDataOutput();
token.write(bado);
byte tokenBytes[] = bado.toByteArray();
Constructor<? extends ReplicaAccessorBuilder> ctor =
cls.getConstructor();
ReplicaAccessorBuilder builder = ctor.newInstance();
ReplicaAccessor accessor = builder.
setAllowShortCircuitReads(allowShortCircuitLocalReads).
setBlock(block.getBlockId(), block.getBlockPoolId()).
setBlockAccessToken(tokenBytes).
setClientName(clientName).
setConfiguration(configuration).
setFileName(fileName).
setVerifyChecksum(verifyChecksum).
setVisibleLength(length).
build();
if (accessor == null) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": No ReplicaAccessor created by " +
cls.getName());
}
} else {
return new ExternalBlockReader(accessor, length, startOffset);
}
} catch (Throwable t) {
LOG.warn("Failed to construct new object of type " +
cls.getName(), t);
}
}
return null;
}
/**
* Get {@link BlockReaderLocalLegacy} for short circuited local reads.
* This block reader implements the path-based style of local reads
@ -367,7 +416,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": trying to construct BlockReaderLocalLegacy");
}
if (!DFSClient.isLocalAddress(inetSocketAddress)) {
if (!DFSUtilClient.isLocalAddress(inetSocketAddress)) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": can't construct BlockReaderLocalLegacy because " +
"the address " + inetSocketAddress + " is not local");
@ -380,7 +429,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
"disableLegacyBlockReaderLocal is set.", this);
return null;
}
IOException ioe = null;
IOException ioe;
try {
return BlockReaderLocalLegacy.newBlockReader(conf,
userGroupInformation, configuration, fileName, block, token,
@ -540,7 +589,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
failureInjector.getSupportsReceiptVerification());
DataInputStream in = new DataInputStream(peer.getInputStream());
BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
PBHelper.vintPrefixed(in));
PBHelperClient.vintPrefixed(in));
DomainSocket sock = peer.getDomainSocket();
failureInjector.injectRequestFileDescriptorsFailure();
switch (resp.getStatus()) {

View File

@ -1,368 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStorageLocation;
import org.apache.hadoop.fs.HdfsVolumeId;
import org.apache.hadoop.fs.VolumeId;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.token.Token;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class BlockStorageLocationUtil {
static final Log LOG = LogFactory
.getLog(BlockStorageLocationUtil.class);
/**
* Create a list of {@link VolumeBlockLocationCallable} corresponding to a set
* of datanodes and blocks. The blocks must all correspond to the same
* block pool.
*
* @param datanodeBlocks
* Map of datanodes to block replicas at each datanode
* @return callables Used to query each datanode for location information on
* the block replicas at the datanode
*/
private static List<VolumeBlockLocationCallable> createVolumeBlockLocationCallables(
Configuration conf, Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks,
int timeout, boolean connectToDnViaHostname, Span parent) {
if (datanodeBlocks.isEmpty()) {
return Lists.newArrayList();
}
// Construct the callables, one per datanode
List<VolumeBlockLocationCallable> callables =
new ArrayList<VolumeBlockLocationCallable>();
for (Map.Entry<DatanodeInfo, List<LocatedBlock>> entry : datanodeBlocks
.entrySet()) {
// Construct RPC parameters
DatanodeInfo datanode = entry.getKey();
List<LocatedBlock> locatedBlocks = entry.getValue();
if (locatedBlocks.isEmpty()) {
continue;
}
// Ensure that the blocks all are from the same block pool.
String poolId = locatedBlocks.get(0).getBlock().getBlockPoolId();
for (LocatedBlock lb : locatedBlocks) {
if (!poolId.equals(lb.getBlock().getBlockPoolId())) {
throw new IllegalArgumentException(
"All blocks to be queried must be in the same block pool: " +
locatedBlocks.get(0).getBlock() + " and " + lb +
" are from different pools.");
}
}
long[] blockIds = new long[locatedBlocks.size()];
int i = 0;
List<Token<BlockTokenIdentifier>> dnTokens =
new ArrayList<Token<BlockTokenIdentifier>>(
locatedBlocks.size());
for (LocatedBlock b : locatedBlocks) {
blockIds[i++] = b.getBlock().getBlockId();
dnTokens.add(b.getBlockToken());
}
VolumeBlockLocationCallable callable = new VolumeBlockLocationCallable(
conf, datanode, poolId, blockIds, dnTokens, timeout,
connectToDnViaHostname, parent);
callables.add(callable);
}
return callables;
}
/**
* Queries datanodes for the blocks specified in <code>datanodeBlocks</code>,
* making one RPC to each datanode. These RPCs are made in parallel using a
* threadpool.
*
* @param datanodeBlocks
* Map of datanodes to the blocks present on the DN
* @return metadatas Map of datanodes to block metadata of the DN
* @throws InvalidBlockTokenException
* if client does not have read access on a requested block
*/
static Map<DatanodeInfo, HdfsBlocksMetadata> queryDatanodesForHdfsBlocksMetadata(
Configuration conf, Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks,
int poolsize, int timeoutMs, boolean connectToDnViaHostname)
throws InvalidBlockTokenException {
List<VolumeBlockLocationCallable> callables =
createVolumeBlockLocationCallables(conf, datanodeBlocks, timeoutMs,
connectToDnViaHostname, Trace.currentSpan());
// Use a thread pool to execute the Callables in parallel
List<Future<HdfsBlocksMetadata>> futures =
new ArrayList<Future<HdfsBlocksMetadata>>();
ExecutorService executor = new ScheduledThreadPoolExecutor(poolsize);
try {
futures = executor.invokeAll(callables, timeoutMs,
TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
// Swallow the exception here, because we can return partial results
}
executor.shutdown();
Map<DatanodeInfo, HdfsBlocksMetadata> metadatas =
Maps.newHashMapWithExpectedSize(datanodeBlocks.size());
// Fill in metadatas with results from DN RPCs, where possible
for (int i = 0; i < futures.size(); i++) {
VolumeBlockLocationCallable callable = callables.get(i);
DatanodeInfo datanode = callable.getDatanodeInfo();
Future<HdfsBlocksMetadata> future = futures.get(i);
try {
HdfsBlocksMetadata metadata = future.get();
metadatas.put(callable.getDatanodeInfo(), metadata);
} catch (CancellationException e) {
LOG.info("Cancelled while waiting for datanode "
+ datanode.getIpcAddr(false) + ": " + e.toString());
} catch (ExecutionException e) {
Throwable t = e.getCause();
if (t instanceof InvalidBlockTokenException) {
LOG.warn("Invalid access token when trying to retrieve "
+ "information from datanode " + datanode.getIpcAddr(false));
throw (InvalidBlockTokenException) t;
}
else if (t instanceof UnsupportedOperationException) {
LOG.info("Datanode " + datanode.getIpcAddr(false) + " does not support"
+ " required #getHdfsBlocksMetadata() API");
throw (UnsupportedOperationException) t;
} else {
LOG.info("Failed to query block locations on datanode " +
datanode.getIpcAddr(false) + ": " + t);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Could not fetch information from datanode", t);
}
} catch (InterruptedException e) {
// Shouldn't happen, because invokeAll waits for all Futures to be ready
LOG.info("Interrupted while fetching HdfsBlocksMetadata");
}
}
return metadatas;
}
/**
* Group the per-replica {@link VolumeId} info returned from
* {@link DFSClient#queryDatanodesForHdfsBlocksMetadata(Map)} to be
* associated
* with the corresponding {@link LocatedBlock}.
*
* @param blocks
* Original LocatedBlock array
* @param metadatas
* VolumeId information for the replicas on each datanode
* @return blockVolumeIds per-replica VolumeId information associated with the
* parent LocatedBlock
*/
static Map<LocatedBlock, List<VolumeId>> associateVolumeIdsWithBlocks(
List<LocatedBlock> blocks,
Map<DatanodeInfo, HdfsBlocksMetadata> metadatas) {
// Initialize mapping of ExtendedBlock to LocatedBlock.
// Used to associate results from DN RPCs to the parent LocatedBlock
Map<Long, LocatedBlock> blockIdToLocBlock =
new HashMap<Long, LocatedBlock>();
for (LocatedBlock b : blocks) {
blockIdToLocBlock.put(b.getBlock().getBlockId(), b);
}
// Initialize the mapping of blocks -> list of VolumeIds, one per replica
// This is filled out with real values from the DN RPCs
Map<LocatedBlock, List<VolumeId>> blockVolumeIds =
new HashMap<LocatedBlock, List<VolumeId>>();
for (LocatedBlock b : blocks) {
ArrayList<VolumeId> l = new ArrayList<VolumeId>(b.getLocations().length);
for (int i = 0; i < b.getLocations().length; i++) {
l.add(null);
}
blockVolumeIds.put(b, l);
}
// Iterate through the list of metadatas (one per datanode).
// For each metadata, if it's valid, insert its volume location information
// into the Map returned to the caller
for (Map.Entry<DatanodeInfo, HdfsBlocksMetadata> entry : metadatas.entrySet()) {
DatanodeInfo datanode = entry.getKey();
HdfsBlocksMetadata metadata = entry.getValue();
// Check if metadata is valid
if (metadata == null) {
continue;
}
long[] metaBlockIds = metadata.getBlockIds();
List<byte[]> metaVolumeIds = metadata.getVolumeIds();
List<Integer> metaVolumeIndexes = metadata.getVolumeIndexes();
// Add VolumeId for each replica in the HdfsBlocksMetadata
for (int j = 0; j < metaBlockIds.length; j++) {
int volumeIndex = metaVolumeIndexes.get(j);
long blockId = metaBlockIds[j];
// Skip if block wasn't found, or not a valid index into metaVolumeIds
// Also skip if the DN responded with a block we didn't ask for
if (volumeIndex == Integer.MAX_VALUE
|| volumeIndex >= metaVolumeIds.size()
|| !blockIdToLocBlock.containsKey(blockId)) {
if (LOG.isDebugEnabled()) {
LOG.debug("No data for block " + blockId);
}
continue;
}
// Get the VolumeId by indexing into the list of VolumeIds
// provided by the datanode
byte[] volumeId = metaVolumeIds.get(volumeIndex);
HdfsVolumeId id = new HdfsVolumeId(volumeId);
// Find out which index we are in the LocatedBlock's replicas
LocatedBlock locBlock = blockIdToLocBlock.get(blockId);
DatanodeInfo[] dnInfos = locBlock.getLocations();
int index = -1;
for (int k = 0; k < dnInfos.length; k++) {
if (dnInfos[k].equals(datanode)) {
index = k;
break;
}
}
if (index < 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Datanode responded with a block volume id we did" +
" not request, omitting.");
}
continue;
}
// Place VolumeId at the same index as the DN's index in the list of
// replicas
List<VolumeId> volumeIds = blockVolumeIds.get(locBlock);
volumeIds.set(index, id);
}
}
return blockVolumeIds;
}
/**
* Helper method to combine a list of {@link LocatedBlock} with associated
* {@link VolumeId} information to form a list of {@link BlockStorageLocation}
* .
*/
static BlockStorageLocation[] convertToVolumeBlockLocations(
List<LocatedBlock> blocks,
Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException {
// Construct the final return value of VolumeBlockLocation[]
BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
List<BlockStorageLocation> volumeBlockLocs =
new ArrayList<BlockStorageLocation>(locations.length);
for (int i = 0; i < locations.length; i++) {
LocatedBlock locBlock = blocks.get(i);
List<VolumeId> volumeIds = blockVolumeIds.get(locBlock);
BlockStorageLocation bsLoc = new BlockStorageLocation(locations[i],
volumeIds.toArray(new VolumeId[0]));
volumeBlockLocs.add(bsLoc);
}
return volumeBlockLocs.toArray(new BlockStorageLocation[] {});
}
/**
* Callable that sets up an RPC proxy to a datanode and queries it for
* volume location information for a list of ExtendedBlocks.
*/
private static class VolumeBlockLocationCallable implements
Callable<HdfsBlocksMetadata> {
private final Configuration configuration;
private final int timeout;
private final DatanodeInfo datanode;
private final String poolId;
private final long[] blockIds;
private final List<Token<BlockTokenIdentifier>> dnTokens;
private final boolean connectToDnViaHostname;
private final Span parentSpan;
VolumeBlockLocationCallable(Configuration configuration,
DatanodeInfo datanode, String poolId, long []blockIds,
List<Token<BlockTokenIdentifier>> dnTokens, int timeout,
boolean connectToDnViaHostname, Span parentSpan) {
this.configuration = configuration;
this.timeout = timeout;
this.datanode = datanode;
this.poolId = poolId;
this.blockIds = blockIds;
this.dnTokens = dnTokens;
this.connectToDnViaHostname = connectToDnViaHostname;
this.parentSpan = parentSpan;
}
public DatanodeInfo getDatanodeInfo() {
return datanode;
}
@Override
public HdfsBlocksMetadata call() throws Exception {
HdfsBlocksMetadata metadata = null;
// Create the RPC proxy and make the RPC
ClientDatanodeProtocol cdp = null;
TraceScope scope =
Trace.startSpan("getHdfsBlocksMetadata", parentSpan);
try {
cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
timeout, connectToDnViaHostname);
metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens);
} catch (IOException e) {
// Bubble this up to the caller, handle with the Future
throw e;
} finally {
scope.close();
if (cdp != null) {
RPC.stopProxy(cdp);
}
}
return metadata;
}
}
}

View File

@ -23,6 +23,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
import org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory;
@ -138,8 +139,8 @@ public class ClientContext {
*/
@VisibleForTesting
public static ClientContext getFromConf(Configuration conf) {
return get(conf.get(DFSConfigKeys.DFS_CLIENT_CONTEXT,
DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT),
return get(conf.get(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT,
HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT),
new DfsClientConf(conf));
}

View File

@ -18,11 +18,11 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
import java.io.BufferedOutputStream;
import java.io.DataInputStream;
@ -39,10 +39,8 @@ import java.net.URI;
import java.net.UnknownHostException;
import java.security.GeneralSecurityException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
@ -70,7 +68,6 @@ import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStorageLocation;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
@ -91,7 +88,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.VolumeId;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
@ -122,7 +118,6 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@ -149,7 +144,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
@ -448,12 +443,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
int getDatanodeWriteTimeout(int numNodes) {
final int t = dfsClientConf.getDatanodeSocketWriteTimeout();
return t > 0? t + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION*numNodes: 0;
return t > 0? t + HdfsConstants.WRITE_TIMEOUT_EXTENSION*numNodes: 0;
}
int getDatanodeReadTimeout(int numNodes) {
final int t = dfsClientConf.getSocketTimeout();
return t > 0? HdfsServerConstants.READ_TIMEOUT_EXTENSION*numNodes + t: 0;
return t > 0? HdfsConstants.READ_TIMEOUT_EXTENSION*numNodes + t: 0;
}
@VisibleForTesting
@ -714,30 +709,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
}
private static final Map<String, Boolean> localAddrMap = Collections
.synchronizedMap(new HashMap<String, Boolean>());
public static boolean isLocalAddress(InetSocketAddress targetAddr) {
InetAddress addr = targetAddr.getAddress();
Boolean cached = localAddrMap.get(addr.getHostAddress());
if (cached != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("Address " + targetAddr +
(cached ? " is local" : " is not local"));
}
return cached;
}
boolean local = NetUtils.isLocalAddress(addr);
if (LOG.isTraceEnabled()) {
LOG.trace("Address " + targetAddr +
(local ? " is local" : " is not local"));
}
localAddrMap.put(addr.getHostAddress(), local);
return local;
}
/**
* Cancel a delegation token
* @param token the token to cancel
@ -930,87 +901,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
}
/**
* Get block location information about a list of {@link HdfsBlockLocation}.
* Used by {@link DistributedFileSystem#getFileBlockStorageLocations(List)} to
* get {@link BlockStorageLocation}s for blocks returned by
* {@link DistributedFileSystem#getFileBlockLocations(org.apache.hadoop.fs.FileStatus, long, long)}
* .
*
* This is done by making a round of RPCs to the associated datanodes, asking
* the volume of each block replica. The returned array of
* {@link BlockStorageLocation} expose this information as a
* {@link VolumeId}.
*
* @param blockLocations
* target blocks on which to query volume location information
* @return volumeBlockLocations original block array augmented with additional
* volume location information for each replica.
*/
public BlockStorageLocation[] getBlockStorageLocations(
List<BlockLocation> blockLocations) throws IOException,
UnsupportedOperationException, InvalidBlockTokenException {
checkOpen();
if (!getConf().isHdfsBlocksMetadataEnabled()) {
throw new UnsupportedOperationException("Datanode-side support for " +
"getVolumeBlockLocations() must also be enabled in the client " +
"configuration.");
}
// Downcast blockLocations and fetch out required LocatedBlock(s)
List<LocatedBlock> blocks = new ArrayList<LocatedBlock>();
for (BlockLocation loc : blockLocations) {
if (!(loc instanceof HdfsBlockLocation)) {
throw new ClassCastException("DFSClient#getVolumeBlockLocations " +
"expected to be passed HdfsBlockLocations");
}
HdfsBlockLocation hdfsLoc = (HdfsBlockLocation) loc;
blocks.add(hdfsLoc.getLocatedBlock());
}
// Re-group the LocatedBlocks to be grouped by datanodes, with the values
// a list of the LocatedBlocks on the datanode.
Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks =
new LinkedHashMap<DatanodeInfo, List<LocatedBlock>>();
for (LocatedBlock b : blocks) {
for (DatanodeInfo info : b.getLocations()) {
if (!datanodeBlocks.containsKey(info)) {
datanodeBlocks.put(info, new ArrayList<LocatedBlock>());
}
List<LocatedBlock> l = datanodeBlocks.get(info);
l.add(b);
}
}
// Make RPCs to the datanodes to get volume locations for its replicas
TraceScope scope =
Trace.startSpan("getBlockStorageLocations", traceSampler);
Map<DatanodeInfo, HdfsBlocksMetadata> metadatas;
try {
metadatas = BlockStorageLocationUtil.
queryDatanodesForHdfsBlocksMetadata(conf, datanodeBlocks,
getConf().getFileBlockStorageLocationsNumThreads(),
getConf().getFileBlockStorageLocationsTimeoutMs(),
getConf().isConnectToDnViaHostname());
if (LOG.isTraceEnabled()) {
LOG.trace("metadata returned: "
+ Joiner.on("\n").withKeyValueSeparator("=").join(metadatas));
}
} finally {
scope.close();
}
// Regroup the returned VolumeId metadata to again be grouped by
// LocatedBlock rather than by datanode
Map<LocatedBlock, List<VolumeId>> blockVolumeIds = BlockStorageLocationUtil
.associateVolumeIdsWithBlocks(blocks, metadatas);
// Combine original BlockLocations with new VolumeId information
BlockStorageLocation[] volumeBlockLocations = BlockStorageLocationUtil
.convertToVolumeBlockLocations(blocks, blockVolumeIds);
return volumeBlockLocations;
}
/**
* Decrypts a EDEK by consulting the KeyProvider.
*/
@ -1955,7 +1845,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
new Sender(out).blockChecksum(block, lb.getBlockToken());
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
String logInfo = "for block " + block + " from datanode " + datanodes[j];
DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
@ -1987,7 +1877,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
// read crc-type
final DataChecksum.Type ct;
if (checksumData.hasCrcType()) {
ct = PBHelper.convert(checksumData
ct = PBHelperClient.convert(checksumData
.getCrcType());
} else {
LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
@ -2115,11 +2005,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
0, 1, true, CachingStrategy.newDefaultStrategy());
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
return PBHelperClient.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtils.cleanup(null, pair.in, pair.out);
}

View File

@ -45,14 +45,20 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_STREAM_BUFFER_SIZE_KEY = "dfs.stream-buffer-size";
public static final int DFS_STREAM_BUFFER_SIZE_DEFAULT = 4096;
public static final String DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum";
public static final int DFS_BYTES_PER_CHECKSUM_DEFAULT = 512;
public static final String DFS_BYTES_PER_CHECKSUM_KEY =
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
public static final int DFS_BYTES_PER_CHECKSUM_DEFAULT =
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
public static final String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix";
public static final String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user";
public static final String DFS_CHECKSUM_TYPE_KEY = "dfs.checksum.type";
public static final String DFS_CHECKSUM_TYPE_DEFAULT = "CRC32C";
public static final String DFS_HDFS_BLOCKS_METADATA_ENABLED = "dfs.datanode.hdfs-blocks-metadata.enabled";
public static final boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false;
public static final String DFS_CHECKSUM_TYPE_KEY = HdfsClientConfigKeys
.DFS_CHECKSUM_TYPE_KEY;
public static final String DFS_CHECKSUM_TYPE_DEFAULT =
HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT;
public static final String DFS_HDFS_BLOCKS_METADATA_ENABLED =
HdfsClientConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED;
public static final boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT =
HdfsClientConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT;
public static final String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
@ -211,6 +217,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
public static final String DFS_NAMENODE_QUOTA_INIT_THREADS_KEY = "dfs.namenode.quota.init-threads";
public static final int DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT = 4;
public static final String DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD = "dfs.namenode.edit.log.autoroll.multiplier.threshold";
public static final float DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD_DEFAULT = 2.0f;
@ -315,6 +323,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
public static final String DFS_NAMENODE_MAX_XATTR_SIZE_KEY = "dfs.namenode.fs-limits.max-xattr-size";
public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
public static final int DFS_NAMENODE_MAX_XATTR_SIZE_HARD_LIMIT = 32768;
//Following keys have no defaults
@ -347,6 +356,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY = "dfs.namenode.audit.log.async";
public static final boolean DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT = false;
public static final String DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST = "dfs.namenode.audit.log.debug.cmdlist";
public static final String DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_KEY =
"dfs.namenode.metrics.logger.period.seconds";
public static final int DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT =
600;
public static final String DFS_BALANCER_MOVEDWINWIDTH_KEY = "dfs.balancer.movedWinWidth";
public static final long DFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;
@ -496,7 +509,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final long DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT = 1024L * 1024L * 1024L * 10L; // 10 GB
public static final String DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY = "dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction";
public static final float DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT = 0.75f;
public static final String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = "dfs.datanode.socket.write.timeout";
public static final String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY =
HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
public static final String DFS_DATANODE_STARTUP_KEY = "dfs.datanode.startup";
public static final String DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins";
public static final String DFS_WEB_UGI_KEY = "dfs.web.ugi";
@ -507,8 +521,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_USER_NAME_KEY = DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
public static final String DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS = "dfs.datanode.shared.file.descriptor.paths";
public static final String DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS_DEFAULT = "/dev/shm,/tmp";
public static final String DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS = "dfs.short.circuit.shared.memory.watcher.interrupt.check.ms";
public static final int DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT = 60000;
public static final String
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS =
HdfsClientConfigKeys
.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS;
public static final int
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT =
HdfsClientConfigKeys
.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT;
public static final String DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file";
public static final String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
HdfsClientConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
@ -549,8 +569,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
0.6f;
public static final String DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY = "dfs.block.local-path-access.user";
public static final String DFS_DOMAIN_SOCKET_PATH_KEY = "dfs.domain.socket.path";
public static final String DFS_DOMAIN_SOCKET_PATH_DEFAULT = "";
public static final String DFS_DOMAIN_SOCKET_PATH_KEY =
HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
public static final String DFS_DOMAIN_SOCKET_PATH_DEFAULT =
HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT;
public static final String DFS_STORAGE_POLICY_ENABLED_KEY = "dfs.storage.policy.enabled";
public static final boolean DFS_STORAGE_POLICY_ENABLED_DEFAULT = true;
@ -972,64 +994,136 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
= HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_DEFAULT;
@Deprecated
public static final String DFS_CLIENT_WRITE_PACKET_SIZE_KEY =
HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
@Deprecated
public static final int DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
public static final String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
public static final int DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
@Deprecated
public static final String DFS_CLIENT_SOCKET_TIMEOUT_KEY =
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
@Deprecated
public static final String DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY =
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
@Deprecated
public static final int DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
@Deprecated
public static final String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY =
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
@Deprecated
public static final long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
public static final String DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout";
public static final String DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY = "dfs.client.socketcache.capacity";
public static final int DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = 16;
public static final String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = "dfs.client.socketcache.expiryMsec";
public static final long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 3000;
@Deprecated
public static final String DFS_CLIENT_USE_DN_HOSTNAME =
HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
@Deprecated
public static final boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
@Deprecated
public static final String DFS_CLIENT_CACHE_DROP_BEHIND_WRITES =
HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES;
@Deprecated
public static final String DFS_CLIENT_CACHE_DROP_BEHIND_READS =
HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS;
@Deprecated
public static final String DFS_CLIENT_CACHE_READAHEAD =
HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD;
@Deprecated
public static final String DFS_CLIENT_CACHED_CONN_RETRY_KEY =
HdfsClientConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
@Deprecated
public static final int DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
public static final String DFS_CLIENT_USE_DN_HOSTNAME = "dfs.client.use.datanode.hostname";
public static final boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT = false;
public static final String DFS_CLIENT_CACHE_DROP_BEHIND_WRITES = "dfs.client.cache.drop.behind.writes";
public static final String DFS_CLIENT_CACHE_DROP_BEHIND_READS = "dfs.client.cache.drop.behind.reads";
public static final String DFS_CLIENT_CACHE_READAHEAD = "dfs.client.cache.readahead";
public static final String DFS_CLIENT_CACHED_CONN_RETRY_KEY = "dfs.client.cached.conn.retry";
public static final int DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT = 3;
@Deprecated
public static final String DFS_CLIENT_CONTEXT = HdfsClientConfigKeys
.DFS_CLIENT_CONTEXT;
@Deprecated
public static final String DFS_CLIENT_CONTEXT_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
@Deprecated
public static final String
DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS =
HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS;
@Deprecated
public static final int
DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT =
HdfsClientConfigKeys
.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT;
@Deprecated
public static final String
DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS =
HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS;
@Deprecated
public static final int
DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT =
HdfsClientConfigKeys
.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT;
public static final String DFS_CLIENT_CONTEXT = "dfs.client.context";
public static final String DFS_CLIENT_CONTEXT_DEFAULT = "default";
public static final String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS = "dfs.client.file-block-storage-locations.num-threads";
public static final int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT = 10;
public static final String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS = "dfs.client.file-block-storage-locations.timeout.millis";
public static final int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT = 1000;
public static final String DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY = "dfs.client.datanode-restart.timeout";
public static final long DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT = 30;
@Deprecated
public static final String DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY =
HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
@Deprecated
public static final long DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";
public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-client.xml";
public static final String DFS_CLIENT_HTTPS_NEED_AUTH_KEY = "dfs.client.https.need-auth";
public static final boolean DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false;
// Much code in hdfs is not yet updated to use these keys.
// the initial delay (unit is ms) for locateFollowingBlock, the delay time will increase exponentially(double) for each retry.
public static final String DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY = "dfs.client.max.block.acquire.failures";
public static final int DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT = 3;
@Deprecated
public static final String DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY =
HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
@Deprecated
public static final int DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADER = "dfs.client.use.legacy.blockreader";
public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT = false;
public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL = "dfs.client.use.legacy.blockreader.local";
public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT = false;
@Deprecated
public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADER =
HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER;
@Deprecated
public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT;
@Deprecated
public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL =
HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL;
@Deprecated
public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT
= HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT;
public static final String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
public static final String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC = "dfs.client.domain.socket.data.traffic";
public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = false;
@Deprecated
public static final String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC =
HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
@Deprecated
public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT;
// The number of NN response dropped by client proactively in each RPC call.
// For testing NN retry cache, we can set this property with positive value.
public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number";
public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
@Deprecated
public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
"dfs.client.slow.io.warning.threshold.ms";
public static final long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000;
HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY;
@Deprecated
public static final long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT;
@Deprecated
public static final String DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS =
"dfs.client.key.provider.cache.expiry";
HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS;
@Deprecated
public static final long DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT =
TimeUnit.DAYS.toMillis(10); // 10 days
HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT;
}

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Syncable;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
@ -202,7 +203,7 @@ public class DFSOutputStream extends FSOutputSummer
}
if (blockSize % bytesPerChecksum != 0) {
throw new HadoopIllegalArgumentException("Invalid values: "
+ DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum
+ HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum
+ ") must divide block size (=" + blockSize + ").");
}
this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager();

View File

@ -67,7 +67,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
@ -1255,7 +1255,7 @@ class DataStreamer extends Daemon {
//ack
BlockOpResponseProto response =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
if (SUCCESS != response.getStatus()) {
throw new IOException("Failed to add a datanode");
}
@ -1552,7 +1552,7 @@ class DataStreamer extends Daemon {
// receive ack for connect
BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
PBHelper.vintPrefixed(blockReplyStream));
PBHelperClient.vintPrefixed(blockReplyStream));
pipelineStatus = resp.getStatus();
firstBadLink = resp.getFirstBadLink();

View File

@ -33,7 +33,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStorageLocation;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.ContentSummary;
@ -58,7 +57,6 @@ import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.VolumeId;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
@ -86,7 +84,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.Text;
@ -235,42 +232,6 @@ public class DistributedFileSystem extends FileSystem {
}.resolve(this, absF);
}
/**
* This API has been deprecated since the NameNode now tracks datanode
* storages separately. Storage IDs can be gotten from {@link
* BlockLocation#getStorageIds()}, which are functionally equivalent to
* the volume IDs returned here (although a String rather than a byte[]).
*
* Used to query storage location information for a list of blocks. This list
* of blocks is normally constructed via a series of calls to
* {@link DistributedFileSystem#getFileBlockLocations(Path, long, long)} to
* get the blocks for ranges of a file.
*
* The returned array of {@link BlockStorageLocation} augments
* {@link BlockLocation} with a {@link VolumeId} per block replica. The
* VolumeId specifies the volume on the datanode on which the replica resides.
* The VolumeId associated with a replica may be null because volume
* information can be unavailable if the corresponding datanode is down or
* if the requested block is not found.
*
* This API is unstable, and datanode-side support is disabled by default. It
* can be enabled by setting "dfs.datanode.hdfs-blocks-metadata.enabled" to
* true.
*
* @param blocks
* List of target BlockLocations to query volume location information
* @return volumeBlockLocations Augmented array of
* {@link BlockStorageLocation}s containing additional volume location
* information for each replica of each block.
*/
@InterfaceStability.Unstable
@Deprecated
public BlockStorageLocation[] getFileBlockStorageLocations(
List<BlockLocation> blocks) throws IOException,
UnsupportedOperationException, InvalidBlockTokenException {
return dfs.getBlockStorageLocations(blocks);
}
@Override
public void setVerifyChecksum(boolean verifyChecksum) {
this.verifyChecksum = verifyChecksum;

View File

@ -0,0 +1,126 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
import org.apache.hadoop.util.DataChecksum;
/**
* An ExternalBlockReader uses pluggable ReplicaAccessor objects to read from
* replicas.
*/
@InterfaceAudience.Private
public final class ExternalBlockReader implements BlockReader {
private final ReplicaAccessor accessor;
private final long visibleLength;
private long pos;
ExternalBlockReader(ReplicaAccessor accessor, long visibleLength,
long startOffset) {
this.accessor = accessor;
this.visibleLength = visibleLength;
this.pos = startOffset;
}
@Override
public int read(byte[] buf, int off, int len) throws IOException {
int nread = accessor.read(pos, buf, off, len);
pos += nread;
return nread;
}
@Override
public int read(ByteBuffer buf) throws IOException {
int nread = accessor.read(pos, buf);
pos += nread;
return nread;
}
@Override
public long skip(long n) throws IOException {
// You cannot skip backwards
if (n <= 0) {
return 0;
}
// You can't skip past the end of the replica.
long oldPos = pos;
pos += n;
if (pos > visibleLength) {
pos = visibleLength;
}
return pos - oldPos;
}
@Override
public int available() throws IOException {
// We return the amount of bytes that we haven't read yet from the
// replica, based on our current position. Some of the other block
// readers return a shorter length than that. The only advantage to
// returning a shorter length is that the DFSInputStream will
// trash your block reader and create a new one if someone tries to
// seek() beyond the available() region.
long diff = visibleLength - pos;
if (diff > Integer.MAX_VALUE) {
return Integer.MAX_VALUE;
} else {
return (int)diff;
}
}
@Override
public void close() throws IOException {
accessor.close();
}
@Override
public void readFully(byte[] buf, int offset, int len) throws IOException {
BlockReaderUtil.readFully(this, buf, offset, len);
}
@Override
public int readAll(byte[] buf, int offset, int len) throws IOException {
return BlockReaderUtil.readAll(this, buf, offset, len);
}
@Override
public boolean isLocal() {
return accessor.isLocal();
}
@Override
public boolean isShortCircuit() {
return accessor.isShortCircuit();
}
@Override
public ClientMmap getClientMmap(EnumSet<ReadOption> opts) {
// For now, pluggable ReplicaAccessors do not support zero-copy.
return null;
}
@Override
public DataChecksum getDataChecksum() {
return null;
}
}

View File

@ -93,7 +93,7 @@ public class HdfsConfiguration extends Configuration {
new DeprecationDelta("dfs.secondary.http.address",
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY),
new DeprecationDelta("dfs.socket.timeout",
DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY),
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY),
new DeprecationDelta("fs.checkpoint.dir",
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY),
new DeprecationDelta("fs.checkpoint.edits.dir",
@ -127,19 +127,19 @@ public class HdfsConfiguration extends Configuration {
new DeprecationDelta("dfs.permissions.supergroup",
DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
new DeprecationDelta("dfs.write.packet.size",
DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY),
HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY),
new DeprecationDelta("dfs.block.size",
DFSConfigKeys.DFS_BLOCK_SIZE_KEY),
new DeprecationDelta("dfs.datanode.max.xcievers",
DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY),
new DeprecationDelta("io.bytes.per.checksum",
DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY),
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY),
new DeprecationDelta("dfs.federation.nameservices",
DFSConfigKeys.DFS_NAMESERVICES),
new DeprecationDelta("dfs.federation.nameservice.id",
DFSConfigKeys.DFS_NAMESERVICE_ID),
new DeprecationDelta("dfs.client.file-block-storage-locations.timeout",
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS),
HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS),
});
}

View File

@ -39,7 +39,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
@ -351,7 +351,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
checksum.getBytesPerChecksum(),
checksum.getChecksumSize());
this.isLocal = DFSClient.isLocalAddress(NetUtils.
this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
createSocketAddr(datanodeID.getXferAddr()));
this.peer = peer;
@ -414,7 +414,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
new BufferedInputStream(peer.getInputStream(), bufferSize));
BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
PBHelper.vintPrefixed(in));
PBHelperClient.vintPrefixed(in));
RemoteBlockReader2.checkSuccess(status, peer, block, file);
ReadOpChecksumInfoProto checksumInfo =
status.getReadOpChecksumInfo();

View File

@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
@ -290,7 +290,7 @@ public class RemoteBlockReader2 implements BlockReader {
DataChecksum checksum, boolean verifyChecksum,
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
DatanodeID datanodeID, PeerCache peerCache) {
this.isLocal = DFSClient.isLocalAddress(NetUtils.
this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
createSocketAddr(datanodeID.getXferAddr()));
// Path is used only for printing block and file information in debug
this.peer = peer;
@ -417,7 +417,7 @@ public class RemoteBlockReader2 implements BlockReader {
DataInputStream in = new DataInputStream(peer.getInputStream());
BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
PBHelper.vintPrefixed(in));
PBHelperClient.vintPrefixed(in));
checkSuccess(status, peer, block, file);
ReadOpChecksumInfoProto checksumInfo =
status.getReadOpChecksumInfo();

View File

@ -130,7 +130,7 @@ public class XAttrHelper {
}
Map<String, byte[]> xAttrMap = Maps.newHashMap();
for (XAttr xAttr : xAttrs) {
String name = getPrefixName(xAttr);
String name = getPrefixedName(xAttr);
byte[] value = xAttr.getValue();
if (value == null) {
value = new byte[0];
@ -144,13 +144,16 @@ public class XAttrHelper {
/**
* Get name with prefix from <code>XAttr</code>
*/
public static String getPrefixName(XAttr xAttr) {
public static String getPrefixedName(XAttr xAttr) {
if (xAttr == null) {
return null;
}
String namespace = xAttr.getNameSpace().toString();
return StringUtils.toLowerCase(namespace) + "." + xAttr.getName();
return getPrefixedName(xAttr.getNameSpace(), xAttr.getName());
}
public static String getPrefixedName(XAttr.NameSpace ns, String name) {
return StringUtils.toLowerCase(ns.toString()) + "." + name;
}
/**

View File

@ -110,25 +110,6 @@ public interface ClientDatanodeProtocol {
BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
Token<BlockTokenIdentifier> token) throws IOException;
/**
* Retrieves volume location information about a list of blocks on a datanode.
* This is in the form of an opaque {@link org.apache.hadoop.fs.VolumeId}
* for each configured data directory, which is not guaranteed to be
* the same across DN restarts.
*
* @param blockPoolId the pool to query
* @param blockIds
* list of blocks on the local datanode
* @param tokens
* block access tokens corresponding to the requested blocks
* @return an HdfsBlocksMetadata that associates {@link ExtendedBlock}s with
* data directories
* @throws IOException
* if datanode is unreachable, or replica is not found on datanode
*/
HdfsBlocksMetadata getHdfsBlocksMetadata(String blockPoolId,
long []blockIds, List<Token<BlockTokenIdentifier>> tokens) throws IOException;
/**
* Shuts down a datanode.
*

View File

@ -1,111 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Longs;
/**
* Augments an array of blocks on a datanode with additional information about
* where the block is stored.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class HdfsBlocksMetadata {
/** The block pool that was queried */
private final String blockPoolId;
/**
* List of blocks
*/
private final long[] blockIds;
/**
* List of volumes
*/
private final List<byte[]> volumeIds;
/**
* List of indexes into <code>volumeIds</code>, one per block in
* <code>blocks</code>. A value of Integer.MAX_VALUE indicates that the
* block was not found.
*/
private final List<Integer> volumeIndexes;
/**
* Constructs HdfsBlocksMetadata.
*
* @param blockIds
* List of blocks described
* @param volumeIds
* List of potential volume identifiers, specifying volumes where
* blocks may be stored
* @param volumeIndexes
* Indexes into the list of volume identifiers, one per block
*/
public HdfsBlocksMetadata(String blockPoolId,
long[] blockIds, List<byte[]> volumeIds,
List<Integer> volumeIndexes) {
Preconditions.checkArgument(blockIds.length == volumeIndexes.size(),
"Argument lengths should match");
this.blockPoolId = blockPoolId;
this.blockIds = blockIds;
this.volumeIds = volumeIds;
this.volumeIndexes = volumeIndexes;
}
/**
* Get the array of blocks.
*
* @return array of blocks
*/
public long[] getBlockIds() {
return blockIds;
}
/**
* Get the list of volume identifiers in raw byte form.
*
* @return list of ids
*/
public List<byte[]> getVolumeIds() {
return volumeIds;
}
/**
* Get a list of indexes into the array of {@link VolumeId}s, one per block.
*
* @return list of indexes
*/
public List<Integer> getVolumeIndexes() {
return volumeIndexes;
}
@Override
public String toString() {
return "Metadata for " + blockIds.length + " blocks in " +
blockPoolId + ": " + Joiner.on(",").join(Longs.asList(blockIds));
}
}

View File

@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.protocol.datatransfer;
import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
import java.io.IOException;
import java.io.InputStream;

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.protocol.datatransfer;
import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.fromProto;
import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.continueTraceSpan;
import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
import java.io.DataInputStream;
import java.io.IOException;
@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto;
@ -38,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProt
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
import org.apache.htrace.TraceScope;
@ -135,7 +137,7 @@ public abstract class Receiver implements DataTransferProtocol {
proto.getClass().getSimpleName());
try {
writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
PBHelper.convertStorageType(proto.getStorageType()),
PBHelperClient.convertStorageType(proto.getStorageType()),
PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
proto.getHeader().getClientName(),
targets,
@ -227,7 +229,7 @@ public abstract class Receiver implements DataTransferProtocol {
proto.getClass().getSimpleName());
try {
replaceBlock(PBHelper.convert(proto.getHeader().getBlock()),
PBHelper.convertStorageType(proto.getStorageType()),
PBHelperClient.convertStorageType(proto.getStorageType()),
PBHelper.convert(proto.getHeader().getToken()),
proto.getDelHint(),
PBHelper.convert(proto.getSource()));

View File

@ -24,7 +24,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
import java.io.IOException;
import java.io.InputStream;

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
@ -39,9 +38,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDat
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.Builder;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ListReconfigurablePropertiesRequestProto;
@ -54,12 +50,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartR
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.security.token.Token;
import com.google.common.primitives.Longs;
import com.google.protobuf.ByteString;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -137,43 +128,11 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
throw new ServiceException(e);
}
return GetBlockLocalPathInfoResponseProto.newBuilder()
.setBlock(PBHelper.convert(resp.getBlock()))
.setBlock(PBHelperClient.convert(resp.getBlock()))
.setLocalPath(resp.getBlockPath()).setLocalMetaPath(resp.getMetaPath())
.build();
}
@Override
public GetHdfsBlockLocationsResponseProto getHdfsBlockLocations(
RpcController controller, GetHdfsBlockLocationsRequestProto request)
throws ServiceException {
HdfsBlocksMetadata resp;
try {
String poolId = request.getBlockPoolId();
List<Token<BlockTokenIdentifier>> tokens =
new ArrayList<Token<BlockTokenIdentifier>>(request.getTokensCount());
for (TokenProto b : request.getTokensList()) {
tokens.add(PBHelper.convert(b));
}
long[] blockIds = Longs.toArray(request.getBlockIdsList());
// Call the real implementation
resp = impl.getHdfsBlocksMetadata(poolId, blockIds, tokens);
} catch (IOException e) {
throw new ServiceException(e);
}
List<ByteString> volumeIdsByteStrings =
new ArrayList<ByteString>(resp.getVolumeIds().size());
for (byte[] b : resp.getVolumeIds()) {
volumeIdsByteStrings.add(ByteString.copyFrom(b));
}
// Build and return the response
Builder builder = GetHdfsBlockLocationsResponseProto.newBuilder();
builder.addAllVolumeIds(volumeIdsByteStrings);
builder.addAllVolumeIndexes(resp.getVolumeIndexes());
return builder.build();
}
@Override
public ShutdownDatanodeResponseProto shutdownDatanode(
RpcController unused, ShutdownDatanodeRequestProto request)

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@ -42,15 +41,12 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ListReconfigurablePropertiesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ListReconfigurablePropertiesResponseProto;
@ -70,11 +66,8 @@ import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.security.token.Token;
import com.google.common.primitives.Longs;
import com.google.protobuf.ByteString;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -185,7 +178,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
@Override
public long getReplicaVisibleLength(ExtendedBlock b) throws IOException {
GetReplicaVisibleLengthRequestProto req = GetReplicaVisibleLengthRequestProto
.newBuilder().setBlock(PBHelper.convert(b)).build();
.newBuilder().setBlock(PBHelperClient.convert(b)).build();
try {
return rpcProxy.getReplicaVisibleLength(NULL_CONTROLLER, req).getLength();
} catch (ServiceException e) {
@ -218,8 +211,8 @@ public class ClientDatanodeProtocolTranslatorPB implements
Token<BlockTokenIdentifier> token) throws IOException {
GetBlockLocalPathInfoRequestProto req =
GetBlockLocalPathInfoRequestProto.newBuilder()
.setBlock(PBHelper.convert(block))
.setToken(PBHelper.convert(token)).build();
.setBlock(PBHelperClient.convert(block))
.setToken(PBHelperClient.convert(token)).build();
GetBlockLocalPathInfoResponseProto resp;
try {
resp = rpcProxy.getBlockLocalPathInfo(NULL_CONTROLLER, req);
@ -242,42 +235,6 @@ public class ClientDatanodeProtocolTranslatorPB implements
return rpcProxy;
}
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(String blockPoolId,
long[] blockIds,
List<Token<BlockTokenIdentifier>> tokens) throws IOException {
List<TokenProto> tokensProtos =
new ArrayList<TokenProto>(tokens.size());
for (Token<BlockTokenIdentifier> t : tokens) {
tokensProtos.add(PBHelper.convert(t));
}
// Build the request
GetHdfsBlockLocationsRequestProto request =
GetHdfsBlockLocationsRequestProto.newBuilder()
.setBlockPoolId(blockPoolId)
.addAllBlockIds(Longs.asList(blockIds))
.addAllTokens(tokensProtos)
.build();
// Send the RPC
GetHdfsBlockLocationsResponseProto response;
try {
response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
// List of volumes in the response
List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList();
List<byte[]> volumeIds = new ArrayList<byte[]>(volumeIdsByteStrings.size());
for (ByteString bs : volumeIdsByteStrings) {
volumeIds.add(bs.toByteArray());
}
// Array of indexes into the list of volumes, one per block
List<Integer> volumeIndexes = response.getVolumeIndexesList();
// Parsed HdfsVolumeId values, one per block
return new HdfsBlocksMetadata(blockPoolId, blockIds,
volumeIds, volumeIndexes);
}
@Override
public void shutdownDatanode(boolean forUpgrade) throws IOException {
ShutdownDatanodeRequestProto request = ShutdownDatanodeRequestProto

View File

@ -703,7 +703,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
RpcController controller, GetDatanodeReportRequestProto req)
throws ServiceException {
try {
List<? extends DatanodeInfoProto> result = PBHelper.convert(server
List<? extends DatanodeInfoProto> result = PBHelperClient.convert(server
.getDatanodeReport(PBHelper.convert(req.getType())));
return GetDatanodeReportResponseProto.newBuilder()
.addAllDi(result).build();
@ -898,7 +898,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
server.setQuota(req.getPath(), req.getNamespaceQuota(),
req.getStoragespaceQuota(),
req.hasStorageType() ?
PBHelper.convertStorageType(req.getStorageType()): null);
PBHelperClient.convertStorageType(req.getStorageType()): null);
return VOID_SETQUOTA_RESPONSE;
} catch (IOException e) {
throw new ServiceException(e);
@ -998,7 +998,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
GetDelegationTokenResponseProto.Builder rspBuilder =
GetDelegationTokenResponseProto.newBuilder();
if (token != null) {
rspBuilder.setToken(PBHelper.convert(token));
rspBuilder.setToken(PBHelperClient.convert(token));
}
return rspBuilder.build();
} catch (IOException e) {

View File

@ -402,7 +402,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
String holder) throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
AbandonBlockRequestProto req = AbandonBlockRequestProto.newBuilder()
.setB(PBHelper.convert(b)).setSrc(src).setHolder(holder)
.setB(PBHelperClient.convert(b)).setSrc(src).setHolder(holder)
.setFileId(fileId).build();
try {
rpcProxy.abandonBlock(null, req);
@ -421,9 +421,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
.setSrc(src).setClientName(clientName).setFileId(fileId);
if (previous != null)
req.setPrevious(PBHelper.convert(previous));
if (excludeNodes != null)
req.addAllExcludeNodes(PBHelper.convert(excludeNodes));
req.setPrevious(PBHelperClient.convert(previous));
if (excludeNodes != null)
req.addAllExcludeNodes(PBHelperClient.convert(excludeNodes));
if (favoredNodes != null) {
req.addAllFavoredNodes(Arrays.asList(favoredNodes));
}
@ -446,10 +446,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
.newBuilder()
.setSrc(src)
.setFileId(fileId)
.setBlk(PBHelper.convert(blk))
.addAllExistings(PBHelper.convert(existings))
.setBlk(PBHelperClient.convert(blk))
.addAllExistings(PBHelperClient.convert(existings))
.addAllExistingStorageUuids(Arrays.asList(existingStorageIDs))
.addAllExcludes(PBHelper.convert(excludes))
.addAllExcludes(PBHelperClient.convert(excludes))
.setNumAdditionalNodes(numAdditionalNodes)
.setClientName(clientName)
.build();
@ -471,7 +471,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
.setClientName(clientName)
.setFileId(fileId);
if (last != null)
req.setLast(PBHelper.convert(last));
req.setLast(PBHelperClient.convert(last));
try {
return rpcProxy.complete(null, req.build()).getResult();
} catch (ServiceException e) {
@ -832,7 +832,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
.setNamespaceQuota(namespaceQuota)
.setStoragespaceQuota(storagespaceQuota);
if (type != null) {
builder.setStorageType(PBHelper.convertStorageType(type));
builder.setStorageType(PBHelperClient.convertStorageType(type));
}
final SetQuotaRequestProto req = builder.build();
try {
@ -910,7 +910,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
String clientName) throws IOException {
UpdateBlockForPipelineRequestProto req = UpdateBlockForPipelineRequestProto
.newBuilder()
.setBlock(PBHelper.convert(block))
.setBlock(PBHelperClient.convert(block))
.setClientName(clientName)
.build();
try {
@ -926,8 +926,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
ExtendedBlock newBlock, DatanodeID[] newNodes, String[] storageIDs) throws IOException {
UpdatePipelineRequestProto req = UpdatePipelineRequestProto.newBuilder()
.setClientName(clientName)
.setOldBlock(PBHelper.convert(oldBlock))
.setNewBlock(PBHelper.convert(newBlock))
.setOldBlock(PBHelperClient.convert(oldBlock))
.setNewBlock(PBHelperClient.convert(newBlock))
.addAllNewNodes(Arrays.asList(PBHelper.convert(newNodes)))
.addAllStorageIDs(storageIDs == null ? null : Arrays.asList(storageIDs))
.build();
@ -958,7 +958,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException {
RenewDelegationTokenRequestProto req = RenewDelegationTokenRequestProto.newBuilder().
setToken(PBHelper.convert(token)).
setToken(PBHelperClient.convert(token)).
build();
try {
return rpcProxy.renewDelegationToken(null, req).getNewExpiryTime();
@ -972,7 +972,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
throws IOException {
CancelDelegationTokenRequestProto req = CancelDelegationTokenRequestProto
.newBuilder()
.setToken(PBHelper.convert(token))
.setToken(PBHelperClient.convert(token))
.build();
try {
rpcProxy.cancelDelegationToken(null, req);

View File

@ -298,11 +298,11 @@ public class DatanodeProtocolClientSideTranslatorPB implements
) throws IOException {
CommitBlockSynchronizationRequestProto.Builder builder =
CommitBlockSynchronizationRequestProto.newBuilder()
.setBlock(PBHelper.convert(block)).setNewGenStamp(newgenerationstamp)
.setBlock(PBHelperClient.convert(block)).setNewGenStamp(newgenerationstamp)
.setNewLength(newlength).setCloseFile(closeFile)
.setDeleteBlock(deleteblock);
for (int i = 0; i < newtargets.length; i++) {
builder.addNewTaragets(PBHelper.convert(newtargets[i]));
builder.addNewTaragets(PBHelperClient.convert(newtargets[i]));
builder.addNewTargetStorages(newtargetstorages[i]);
}
CommitBlockSynchronizationRequestProto req = builder.build();

View File

@ -105,7 +105,7 @@ public class InterDatanodeProtocolTranslatorPB implements
long recoveryId, long newBlockId, long newLength) throws IOException {
UpdateReplicaUnderRecoveryRequestProto req =
UpdateReplicaUnderRecoveryRequestProto.newBuilder()
.setBlock(PBHelper.convert(oldBlock))
.setBlock(PBHelperClient.convert(oldBlock))
.setNewLength(newLength).setNewBlockId(newBlockId)
.setRecoveryId(recoveryId).build();
try {

View File

@ -101,7 +101,7 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {
GetBlocksRequestProto req = GetBlocksRequestProto.newBuilder()
.setDatanode(PBHelper.convert((DatanodeID)datanode)).setSize(size)
.setDatanode(PBHelperClient.convert((DatanodeID)datanode)).setSize(size)
.build();
try {
return PBHelper.convert(rpcProxy.getBlocks(NULL_CONTROLLER, req)

View File

@ -362,7 +362,7 @@ public class PBHelper {
if (types == null || types.length == 0) {
return null;
}
List<StorageTypeProto> list = convertStorageTypes(types);
List<StorageTypeProto> list = PBHelperClient.convertStorageTypes(types);
return StorageTypesProto.newBuilder().addAllStorageTypes(list).build();
}
@ -397,20 +397,6 @@ public class PBHelper {
.getInfoSecurePort() : 0, dn.getIpcPort());
}
public static DatanodeIDProto convert(DatanodeID dn) {
// For wire compatibility with older versions we transmit the StorageID
// which is the same as the DatanodeUuid. Since StorageID is a required
// field we pass the empty string if the DatanodeUuid is not yet known.
return DatanodeIDProto.newBuilder()
.setIpAddr(dn.getIpAddr())
.setHostName(dn.getHostName())
.setXferPort(dn.getXferPort())
.setDatanodeUuid(dn.getDatanodeUuid() != null ? dn.getDatanodeUuid() : "")
.setInfoPort(dn.getInfoPort())
.setInfoSecurePort(dn.getInfoSecurePort())
.setIpcPort(dn.getIpcPort()).build();
}
// Arrays of DatanodeId
public static DatanodeIDProto[] convert(DatanodeID[] did) {
if (did == null)
@ -418,7 +404,7 @@ public class PBHelper {
final int len = did.length;
DatanodeIDProto[] result = new DatanodeIDProto[len];
for (int i = 0; i < len; ++i) {
result[i] = convert(did[i]);
result[i] = PBHelperClient.convert(did[i]);
}
return result;
}
@ -449,7 +435,7 @@ public class PBHelper {
.newBuilder().setBlock(convert(blk.getBlock()))
.addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
.addAllStorageUuids(Arrays.asList(blk.getStorageIDs()))
.addAllStorageTypes(convertStorageTypes(blk.getStorageTypes()));
.addAllStorageTypes(PBHelperClient.convertStorageTypes(blk.getStorageTypes()));
if (blk instanceof StripedBlockWithLocations) {
StripedBlockWithLocations sblk = (StripedBlockWithLocations) blk;
builder.setIndices(getByteString(sblk.getIndices()));
@ -621,16 +607,6 @@ public class PBHelper {
eb.getGenerationStamp());
}
public static ExtendedBlockProto convert(final ExtendedBlock b) {
if (b == null) return null;
return ExtendedBlockProto.newBuilder().
setPoolId(b.getBlockPoolId()).
setBlockId(b.getBlockId()).
setNumBytes(b.getNumBytes()).
setGenerationStamp(b.getGenerationStamp()).
build();
}
public static RecoveringBlockProto convert(RecoveringBlock b) {
if (b == null) {
return null;
@ -651,17 +627,6 @@ public class PBHelper {
new RecoveringBlock(block, locs, b.getNewGenStamp());
}
public static DatanodeInfoProto.AdminState convert(
final DatanodeInfo.AdminStates inAs) {
switch (inAs) {
case NORMAL: return DatanodeInfoProto.AdminState.NORMAL;
case DECOMMISSION_INPROGRESS:
return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS;
case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED;
default: return DatanodeInfoProto.AdminState.NORMAL;
}
}
static public DatanodeInfo convert(DatanodeInfoProto di) {
if (di == null) return null;
return new DatanodeInfo(
@ -673,12 +638,6 @@ public class PBHelper {
di.getXceiverCount(), PBHelper.convert(di.getAdminState()));
}
static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
if (di == null) return null;
return convert(di);
}
static public DatanodeInfo[] convert(DatanodeInfoProto di[]) {
if (di == null) return null;
DatanodeInfo[] result = new DatanodeInfo[di.length];
@ -688,27 +647,6 @@ public class PBHelper {
return result;
}
public static List<? extends HdfsProtos.DatanodeInfoProto> convert(
DatanodeInfo[] dnInfos) {
return convert(dnInfos, 0);
}
/**
* Copy from {@code dnInfos} to a target of list of same size starting at
* {@code startIdx}.
*/
public static List<? extends HdfsProtos.DatanodeInfoProto> convert(
DatanodeInfo[] dnInfos, int startIdx) {
if (dnInfos == null)
return null;
ArrayList<HdfsProtos.DatanodeInfoProto> protos = Lists
.newArrayListWithCapacity(dnInfos.length);
for (int i = startIdx; i < dnInfos.length; i++) {
protos.add(convert(dnInfos[i]));
}
return protos;
}
public static DatanodeInfo[] convert(List<DatanodeInfoProto> list) {
DatanodeInfo[] info = new DatanodeInfo[list.size()];
for (int i = 0; i < info.length; i++) {
@ -716,32 +654,11 @@ public class PBHelper {
}
return info;
}
public static DatanodeInfoProto convert(DatanodeInfo info) {
DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder();
if (info.getNetworkLocation() != null) {
builder.setLocation(info.getNetworkLocation());
}
builder
.setId(PBHelper.convert((DatanodeID)info))
.setCapacity(info.getCapacity())
.setDfsUsed(info.getDfsUsed())
.setRemaining(info.getRemaining())
.setBlockPoolUsed(info.getBlockPoolUsed())
.setCacheCapacity(info.getCacheCapacity())
.setCacheUsed(info.getCacheUsed())
.setLastUpdate(info.getLastUpdate())
.setLastUpdateMonotonic(info.getLastUpdateMonotonic())
.setXceiverCount(info.getXceiverCount())
.setAdminState(PBHelper.convert(info.getAdminState()))
.build();
return builder.build();
}
public static DatanodeStorageReportProto convertDatanodeStorageReport(
DatanodeStorageReport report) {
return DatanodeStorageReportProto.newBuilder()
.setDatanodeInfo(convert(report.getDatanodeInfo()))
.setDatanodeInfo(PBHelperClient.convert(report.getDatanodeInfo()))
.addAllStorageReports(convertStorageReports(report.getStorageReports()))
.build();
}
@ -793,7 +710,7 @@ public class PBHelper {
Lists.newLinkedList(Arrays.asList(b.getCachedLocations()));
for (int i = 0; i < locs.length; i++) {
DatanodeInfo loc = locs[i];
builder.addLocs(i, PBHelper.convert(loc));
builder.addLocs(i, PBHelperClient.convert(loc));
boolean locIsCached = cachedLocs.contains(loc);
builder.addIsCached(locIsCached);
if (locIsCached) {
@ -807,7 +724,7 @@ public class PBHelper {
StorageType[] storageTypes = b.getStorageTypes();
if (storageTypes != null) {
for (StorageType storageType : storageTypes) {
builder.addStorageTypes(PBHelper.convertStorageType(storageType));
builder.addStorageTypes(PBHelperClient.convertStorageType(storageType));
}
}
final String[] storageIDs = b.getStorageIDs();
@ -820,12 +737,12 @@ public class PBHelper {
Token<BlockTokenIdentifier>[] blockTokens = sb.getBlockTokens();
for (int i = 0; i < indices.length; i++) {
builder.addBlockIndex(indices[i]);
builder.addBlockTokens(PBHelper.convert(blockTokens[i]));
builder.addBlockTokens(PBHelperClient.convert(blockTokens[i]));
}
}
return builder.setB(PBHelper.convert(b.getBlock()))
.setBlockToken(PBHelper.convert(b.getBlockToken()))
return builder.setB(PBHelperClient.convert(b.getBlock()))
.setBlockToken(PBHelperClient.convert(b.getBlockToken()))
.setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
}
@ -889,14 +806,6 @@ public class PBHelper {
return lb;
}
public static TokenProto convert(Token<?> tok) {
return TokenProto.newBuilder().
setIdentifier(ByteString.copyFrom(tok.getIdentifier())).
setPassword(ByteString.copyFrom(tok.getPassword())).
setKind(tok.getKind().toString()).
setService(tok.getService().toString()).build();
}
public static Token<BlockTokenIdentifier> convert(
TokenProto blockToken) {
return new Token<BlockTokenIdentifier>(blockToken.getIdentifier()
@ -948,7 +857,7 @@ public class PBHelper {
DatanodeRegistration registration) {
DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto
.newBuilder();
return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration))
return builder.setDatanodeID(PBHelperClient.convert((DatanodeID) registration))
.setStorageInfo(PBHelper.convert(registration.getStorageInfo()))
.setKeys(PBHelper.convert(registration.getExportedKeys()))
.setSoftwareVersion(registration.getSoftwareVersion()).build();
@ -1042,7 +951,7 @@ public class PBHelper {
if (types != null) {
for (StorageType[] ts : types) {
StorageTypesProto.Builder builder = StorageTypesProto.newBuilder();
builder.addAllStorageTypes(convertStorageTypes(ts));
builder.addAllStorageTypes(PBHelperClient.convertStorageTypes(ts));
list.add(builder.build());
}
}
@ -1073,7 +982,7 @@ public class PBHelper {
DatanodeInfosProto[] ret = new DatanodeInfosProto[targets.length];
for (int i = 0; i < targets.length; i++) {
ret[i] = DatanodeInfosProto.newBuilder()
.addAllDatanodes(PBHelper.convert(targets[i])).build();
.addAllDatanodes(PBHelperClient.convert(targets[i])).build();
}
return Arrays.asList(ret);
}
@ -1407,7 +1316,7 @@ public class PBHelper {
fs.getFileBufferSize(),
fs.getEncryptDataTransfer(),
fs.getTrashInterval(),
PBHelper.convert(fs.getChecksumType()));
PBHelperClient.convert(fs.getChecksumType()));
}
public static FsServerDefaultsProto convert(FsServerDefaults fs) {
@ -1420,7 +1329,7 @@ public class PBHelper {
.setFileBufferSize(fs.getFileBufferSize())
.setEncryptDataTransfer(fs.getEncryptDataTransfer())
.setTrashInterval(fs.getTrashInterval())
.setChecksumType(PBHelper.convert(fs.getChecksumType()))
.setChecksumType(PBHelperClient.convert(fs.getChecksumType()))
.build();
}
@ -1812,7 +1721,7 @@ public class PBHelper {
if (cs.hasTypeQuotaInfos()) {
for (HdfsProtos.StorageTypeQuotaInfoProto info :
cs.getTypeQuotaInfos().getTypeQuotaInfoList()) {
StorageType type = PBHelper.convertStorageType(info.getType());
StorageType type = PBHelperClient.convertStorageType(info.getType());
builder.typeConsumed(type, info.getConsumed());
builder.typeQuota(type, info.getQuota());
}
@ -1836,7 +1745,7 @@ public class PBHelper {
for (StorageType t: StorageType.getTypesSupportingQuota()) {
HdfsProtos.StorageTypeQuotaInfoProto info =
HdfsProtos.StorageTypeQuotaInfoProto.newBuilder().
setType(convertStorageType(t)).
setType(PBHelperClient.convertStorageType(t)).
setConsumed(cs.getTypeConsumed(t)).
setQuota(cs.getTypeQuota(t)).
build();
@ -1881,7 +1790,7 @@ public class PBHelper {
public static DatanodeStorageProto convert(DatanodeStorage s) {
return DatanodeStorageProto.newBuilder()
.setState(PBHelper.convertState(s.getState()))
.setStorageType(PBHelper.convertStorageType(s.getStorageType()))
.setStorageType(PBHelperClient.convertStorageType(s.getStorageType()))
.setStorageUuid(s.getStorageID()).build();
}
@ -1895,44 +1804,10 @@ public class PBHelper {
}
}
public static List<StorageTypeProto> convertStorageTypes(
StorageType[] types) {
return convertStorageTypes(types, 0);
}
public static List<StorageTypeProto> convertStorageTypes(
StorageType[] types, int startIdx) {
if (types == null) {
return null;
}
final List<StorageTypeProto> protos = new ArrayList<StorageTypeProto>(
types.length);
for (int i = startIdx; i < types.length; ++i) {
protos.add(convertStorageType(types[i]));
}
return protos;
}
public static StorageTypeProto convertStorageType(StorageType type) {
switch(type) {
case DISK:
return StorageTypeProto.DISK;
case SSD:
return StorageTypeProto.SSD;
case ARCHIVE:
return StorageTypeProto.ARCHIVE;
case RAM_DISK:
return StorageTypeProto.RAM_DISK;
default:
throw new IllegalStateException(
"BUG: StorageType not found, type=" + type);
}
}
public static DatanodeStorage convert(DatanodeStorageProto s) {
return new DatanodeStorage(s.getStorageUuid(),
PBHelper.convertState(s.getState()),
PBHelper.convertStorageType(s.getStorageType()));
PBHelperClient.convertStorageType(s.getStorageType()));
}
private static State convertState(StorageState state) {
@ -1945,22 +1820,6 @@ public class PBHelper {
}
}
public static StorageType convertStorageType(StorageTypeProto type) {
switch(type) {
case DISK:
return StorageType.DISK;
case SSD:
return StorageType.SSD;
case ARCHIVE:
return StorageType.ARCHIVE;
case RAM_DISK:
return StorageType.RAM_DISK;
default:
throw new IllegalStateException(
"BUG: StorageTypeProto not found, type=" + type);
}
}
public static StorageType[] convertStorageTypes(
List<StorageTypeProto> storageTypesList, int expectedSize) {
final StorageType[] storageTypes = new StorageType[expectedSize];
@ -1969,7 +1828,7 @@ public class PBHelper {
Arrays.fill(storageTypes, StorageType.DEFAULT);
} else {
for (int i = 0; i < storageTypes.length; ++i) {
storageTypes[i] = convertStorageType(storageTypesList.get(i));
storageTypes[i] = PBHelperClient.convertStorageType(storageTypesList.get(i));
}
}
return storageTypes;
@ -2153,10 +2012,6 @@ public class PBHelper {
return reportProto;
}
public static DataChecksum.Type convert(HdfsProtos.ChecksumTypeProto type) {
return DataChecksum.Type.valueOf(type.getNumber());
}
public static CacheDirectiveInfoProto convert
(CacheDirectiveInfo info) {
CacheDirectiveInfoProto.Builder builder =
@ -2329,9 +2184,6 @@ public class PBHelper {
return new CachePoolEntry(info, stats);
}
public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) {
return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
}
public static DatanodeLocalInfoProto convert(DatanodeLocalInfo info) {
DatanodeLocalInfoProto.Builder builder = DatanodeLocalInfoProto.newBuilder();
@ -2346,17 +2198,6 @@ public class PBHelper {
proto.getConfigVersion(), proto.getUptime());
}
public static InputStream vintPrefixed(final InputStream input)
throws IOException {
final int firstByte = input.read();
if (firstByte == -1) {
throw new EOFException("Premature EOF: no length prefix available");
}
int size = CodedInputStream.readRawVarint32(firstByte, input);
assert size >= 0;
return new ExactSizeInputStream(input, size);
}
private static AclEntryScopeProto convert(AclEntryScope v) {
return AclEntryScopeProto.valueOf(v.ordinal());
@ -2580,30 +2421,11 @@ public class PBHelper {
proto.getKeyName());
}
public static ShortCircuitShmSlotProto convert(SlotId slotId) {
return ShortCircuitShmSlotProto.newBuilder().
setShmId(convert(slotId.getShmId())).
setSlotIdx(slotId.getSlotIdx()).
build();
}
public static ShortCircuitShmIdProto convert(ShmId shmId) {
return ShortCircuitShmIdProto.newBuilder().
setHi(shmId.getHi()).
setLo(shmId.getLo()).
build();
}
public static SlotId convert(ShortCircuitShmSlotProto slotId) {
return new SlotId(PBHelper.convert(slotId.getShmId()),
return new SlotId(PBHelperClient.convert(slotId.getShmId()),
slotId.getSlotIdx());
}
public static ShmId convert(ShortCircuitShmIdProto shmId) {
return new ShmId(shmId.getHi(), shmId.getLo());
}
private static Event.CreateEvent.INodeType createTypeConvert(InotifyProtos.INodeType
type) {
switch (type) {
@ -3110,18 +2932,6 @@ public class PBHelper {
ezKeyVersionName);
}
public static List<Boolean> convert(boolean[] targetPinnings, int idx) {
List<Boolean> pinnings = new ArrayList<Boolean>();
if (targetPinnings == null) {
pinnings.add(Boolean.FALSE);
} else {
for (; idx < targetPinnings.length; ++idx) {
pinnings.add(Boolean.valueOf(targetPinnings[idx]));
}
}
return pinnings;
}
public static boolean[] convertBooleanList(
List<Boolean> targetPinningsList) {
final boolean[] targetPinnings = new boolean[targetPinningsList.size()];
@ -3239,7 +3049,8 @@ public class PBHelper {
BlockECRecoveryInfo blockEcRecoveryInfo) {
BlockECRecoveryInfoProto.Builder builder = BlockECRecoveryInfoProto
.newBuilder();
builder.setBlock(convert(blockEcRecoveryInfo.getExtendedBlock()));
builder.setBlock(PBHelperClient.convert(
blockEcRecoveryInfo.getExtendedBlock()));
DatanodeInfo[] sourceDnInfos = blockEcRecoveryInfo.getSourceDnInfos();
builder.setSourceDnInfos(convertToDnInfosProto(sourceDnInfos));
@ -3275,7 +3086,7 @@ public class PBHelper {
StorageType[] targetStorageTypes) {
StorageTypesProto.Builder builder = StorageTypesProto.newBuilder();
for (StorageType storageType : targetStorageTypes) {
builder.addStorageTypes(convertStorageType(storageType));
builder.addStorageTypes(PBHelperClient.convertStorageType(storageType));
}
return builder.build();
}
@ -3291,7 +3102,7 @@ public class PBHelper {
private static DatanodeInfosProto convertToDnInfosProto(DatanodeInfo[] dnInfos) {
DatanodeInfosProto.Builder builder = DatanodeInfosProto.newBuilder();
for (DatanodeInfo datanodeInfo : dnInfos) {
builder.addDatanodes(convert(datanodeInfo));
builder.addDatanodes(PBHelperClient.convert(datanodeInfo));
}
return builder.build();
}

View File

@ -27,6 +27,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
@ -54,6 +55,7 @@ import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.HostsFileReader;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
@ -188,9 +190,11 @@ public class Balancer {
private final Dispatcher dispatcher;
private final NameNodeConnector nnc;
private final BalancingPolicy policy;
private final Set<String> sourceNodes;
private final boolean runDuringUpgrade;
private final double threshold;
private final long maxSizeToMove;
private final long defaultBlockSize;
// all data node lists
private final Collection<Source> overUtilized = new LinkedList<Source>();
@ -261,16 +265,20 @@ public class Balancer {
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_DEFAULT);
this.nnc = theblockpool;
this.dispatcher = new Dispatcher(theblockpool, p.nodesToBeIncluded,
p.nodesToBeExcluded, movedWinWidth, moverThreads, dispatcherThreads,
this.dispatcher = new Dispatcher(theblockpool, p.includedNodes,
p.excludedNodes, movedWinWidth, moverThreads, dispatcherThreads,
maxConcurrentMovesPerNode, getBlocksSize, getBlocksMinBlockSize, conf);
this.threshold = p.threshold;
this.policy = p.policy;
this.sourceNodes = p.sourceNodes;
this.runDuringUpgrade = p.runDuringUpgrade;
this.maxSizeToMove = getLong(conf,
DFSConfigKeys.DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY,
DFSConfigKeys.DFS_BALANCER_MAX_SIZE_TO_MOVE_DEFAULT);
this.defaultBlockSize = getLong(conf,
DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
}
private static long getCapacity(DatanodeStorageReport report, StorageType t) {
@ -283,11 +291,13 @@ public class Balancer {
return capacity;
}
private static long getRemaining(DatanodeStorageReport report, StorageType t) {
private long getRemaining(DatanodeStorageReport report, StorageType t) {
long remaining = 0L;
for(StorageReport r : report.getStorageReports()) {
if (r.getStorage().getStorageType() == t) {
remaining += r.getRemaining();
if (r.getRemaining() >= defaultBlockSize) {
remaining += r.getRemaining();
}
}
}
return remaining;
@ -314,17 +324,26 @@ public class Balancer {
long overLoadedBytes = 0L, underLoadedBytes = 0L;
for(DatanodeStorageReport r : reports) {
final DDatanode dn = dispatcher.newDatanode(r.getDatanodeInfo());
final boolean isSource = Util.isIncluded(sourceNodes, dn.getDatanodeInfo());
for(StorageType t : StorageType.getMovableTypes()) {
final Double utilization = policy.getUtilization(r, t);
if (utilization == null) { // datanode does not have such storage type
continue;
}
final double average = policy.getAvgUtilization(t);
if (utilization >= average && !isSource) {
LOG.info(dn + "[" + t + "] has utilization=" + utilization
+ " >= average=" + average
+ " but it is not specified as a source; skipping it.");
continue;
}
final double utilizationDiff = utilization - average;
final long capacity = getCapacity(r, t);
final double utilizationDiff = utilization - policy.getAvgUtilization(t);
final double thresholdDiff = Math.abs(utilizationDiff) - threshold;
final long maxSize2Move = computeMaxSize2Move(capacity,
getRemaining(r, t), utilizationDiff, threshold, maxSizeToMove);
getRemaining(r, t), utilizationDiff, maxSizeToMove);
final StorageGroup g;
if (utilizationDiff > 0) {
@ -361,8 +380,8 @@ public class Balancer {
}
private static long computeMaxSize2Move(final long capacity, final long remaining,
final double utilizationDiff, final double threshold, final long max) {
final double diff = Math.min(threshold, Math.abs(utilizationDiff));
final double utilizationDiff, final long max) {
final double diff = Math.abs(utilizationDiff);
long maxSizeToMove = percentage2bytes(diff, capacity);
if (utilizationDiff < 0) {
maxSizeToMove = Math.min(remaining, maxSizeToMove);
@ -619,6 +638,9 @@ public class Balancer {
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000;
LOG.info("namenodes = " + namenodes);
LOG.info("parameters = " + p);
LOG.info("included nodes = " + p.includedNodes);
LOG.info("excluded nodes = " + p.excludedNodes);
LOG.info("source nodes = " + p.sourceNodes);
System.out.println("Time Stamp Iteration# Bytes Already Moved Bytes Left To Move Bytes Being Moved");
@ -682,29 +704,35 @@ public class Balancer {
static final Parameters DEFAULT = new Parameters(
BalancingPolicy.Node.INSTANCE, 10.0,
NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
Collections.<String> emptySet(), Collections.<String> emptySet(),
Collections.<String>emptySet(), Collections.<String>emptySet(),
Collections.<String>emptySet(),
false);
final BalancingPolicy policy;
final double threshold;
final int maxIdleIteration;
// exclude the nodes in this set from balancing operations
Set<String> nodesToBeExcluded;
//include only these nodes in balancing operations
Set<String> nodesToBeIncluded;
/** Exclude the nodes in this set. */
final Set<String> excludedNodes;
/** If empty, include any node; otherwise, include only these nodes. */
final Set<String> includedNodes;
/** If empty, any node can be a source;
* otherwise, use only these nodes as source nodes.
*/
final Set<String> sourceNodes;
/**
* Whether to run the balancer during upgrade.
*/
final boolean runDuringUpgrade;
Parameters(BalancingPolicy policy, double threshold, int maxIdleIteration,
Set<String> nodesToBeExcluded, Set<String> nodesToBeIncluded,
boolean runDuringUpgrade) {
Set<String> excludedNodes, Set<String> includedNodes,
Set<String> sourceNodes, boolean runDuringUpgrade) {
this.policy = policy;
this.threshold = threshold;
this.maxIdleIteration = maxIdleIteration;
this.nodesToBeExcluded = nodesToBeExcluded;
this.nodesToBeIncluded = nodesToBeIncluded;
this.excludedNodes = excludedNodes;
this.includedNodes = includedNodes;
this.sourceNodes = sourceNodes;
this.runDuringUpgrade = runDuringUpgrade;
}
@ -712,13 +740,14 @@ public class Balancer {
public String toString() {
return String.format("%s.%s [%s,"
+ " threshold = %s,"
+ " max idle iteration = %s, "
+ "number of nodes to be excluded = %s,"
+ " number of nodes to be included = %s,"
+ " max idle iteration = %s,"
+ " #excluded nodes = %s,"
+ " #included nodes = %s,"
+ " #source nodes = %s,"
+ " run during upgrade = %s]",
Balancer.class.getSimpleName(), getClass().getSimpleName(),
policy, threshold, maxIdleIteration,
nodesToBeExcluded.size(), nodesToBeIncluded.size(),
excludedNodes.size(), includedNodes.size(), sourceNodes.size(),
runDuringUpgrade);
}
}
@ -759,8 +788,9 @@ public class Balancer {
BalancingPolicy policy = Parameters.DEFAULT.policy;
double threshold = Parameters.DEFAULT.threshold;
int maxIdleIteration = Parameters.DEFAULT.maxIdleIteration;
Set<String> nodesTobeExcluded = Parameters.DEFAULT.nodesToBeExcluded;
Set<String> nodesTobeIncluded = Parameters.DEFAULT.nodesToBeIncluded;
Set<String> excludedNodes = Parameters.DEFAULT.excludedNodes;
Set<String> includedNodes = Parameters.DEFAULT.includedNodes;
Set<String> sourceNodes = Parameters.DEFAULT.sourceNodes;
boolean runDuringUpgrade = Parameters.DEFAULT.runDuringUpgrade;
if (args != null) {
@ -792,29 +822,14 @@ public class Balancer {
throw e;
}
} else if ("-exclude".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length,
"List of nodes to exclude | -f <filename> is missing: args = "
+ Arrays.toString(args));
if ("-f".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length,
"File containing nodes to exclude is not specified: args = "
+ Arrays.toString(args));
nodesTobeExcluded = Util.getHostListFromFile(args[i], "exclude");
} else {
nodesTobeExcluded = Util.parseHostList(args[i]);
}
excludedNodes = new HashSet<>();
i = processHostList(args, i, "exclude", excludedNodes);
} else if ("-include".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length,
"List of nodes to include | -f <filename> is missing: args = "
+ Arrays.toString(args));
if ("-f".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length,
"File containing nodes to include is not specified: args = "
+ Arrays.toString(args));
nodesTobeIncluded = Util.getHostListFromFile(args[i], "include");
} else {
nodesTobeIncluded = Util.parseHostList(args[i]);
}
includedNodes = new HashSet<>();
i = processHostList(args, i, "include", includedNodes);
} else if ("-source".equalsIgnoreCase(args[i])) {
sourceNodes = new HashSet<>();
i = processHostList(args, i, "source", sourceNodes);
} else if ("-idleiterations".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length,
"idleiterations value is missing: args = " + Arrays
@ -832,7 +847,7 @@ public class Balancer {
+ Arrays.toString(args));
}
}
checkArgument(nodesTobeExcluded.isEmpty() || nodesTobeIncluded.isEmpty(),
checkArgument(excludedNodes.isEmpty() || includedNodes.isEmpty(),
"-exclude and -include options cannot be specified together.");
} catch(RuntimeException e) {
printUsage(System.err);
@ -841,7 +856,31 @@ public class Balancer {
}
return new Parameters(policy, threshold, maxIdleIteration,
nodesTobeExcluded, nodesTobeIncluded, runDuringUpgrade);
excludedNodes, includedNodes, sourceNodes, runDuringUpgrade);
}
private static int processHostList(String[] args, int i, String type,
Set<String> nodes) {
Preconditions.checkArgument(++i < args.length,
"List of %s nodes | -f <filename> is missing: args=%s",
type, Arrays.toString(args));
if ("-f".equalsIgnoreCase(args[i])) {
Preconditions.checkArgument(++i < args.length,
"File containing %s nodes is not specified: args=%s",
type, Arrays.toString(args));
final String filename = args[i];
try {
HostsFileReader.readFileToSet(type, filename, nodes);
} catch (IOException e) {
throw new IllegalArgumentException(
"Failed to read " + type + " node list from file: " + filename);
}
} else {
final String[] addresses = StringUtils.getTrimmedStrings(args[i]);
nodes.addAll(Arrays.asList(addresses));
}
return i;
}
private static void printUsage(PrintStream out) {

View File

@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.balancer;
import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
@ -29,7 +29,6 @@ import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumMap;
import java.util.HashMap;
@ -64,7 +63,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations;
@ -73,7 +71,6 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.HostsFileReader;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
@ -325,7 +322,7 @@ public class Dispatcher {
try {
sock.connect(
NetUtils.createSocketAddr(target.getDatanodeInfo().getXferAddr()),
HdfsServerConstants.READ_TIMEOUT);
HdfsConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
@ -857,7 +854,11 @@ public class Dispatcher {
if (shouldFetchMoreBlocks()) {
// fetch new blocks
try {
blocksToReceive -= getBlockList();
final long received = getBlockList();
if (received == 0) {
return;
}
blocksToReceive -= received;
continue;
} catch (IOException e) {
LOG.warn("Exception while getting reportedBlock list", e);
@ -986,8 +987,11 @@ public class Dispatcher {
if (decommissioned || decommissioning || excluded || notIncluded) {
if (LOG.isTraceEnabled()) {
LOG.trace("Excluding datanode " + dn + ": " + decommissioned + ", "
+ decommissioning + ", " + excluded + ", " + notIncluded);
LOG.trace("Excluding datanode " + dn
+ ": decommissioned=" + decommissioned
+ ", decommissioning=" + decommissioning
+ ", excluded=" + excluded
+ ", notIncluded=" + notIncluded);
}
return true;
}
@ -1273,31 +1277,5 @@ public class Dispatcher {
}
return (nodes.contains(host) || nodes.contains(host + ":" + port));
}
/**
* Parse a comma separated string to obtain set of host names
*
* @return set of host names
*/
static Set<String> parseHostList(String string) {
String[] addrs = StringUtils.getTrimmedStrings(string);
return new HashSet<String>(Arrays.asList(addrs));
}
/**
* Read set of host names from a file
*
* @return set of host names
*/
static Set<String> getHostListFromFile(String fileName, String type) {
Set<String> nodes = new HashSet<String>();
try {
HostsFileReader.readFileToSet(type, fileName, nodes);
return StringUtils.getTrimmedStrings(nodes);
} catch (IOException e) {
throw new IllegalArgumentException(
"Failed to read host list from file: " + fileName);
}
}
}
}

View File

@ -60,7 +60,7 @@ public interface BlockCollection {
*/
public short getPreferredBlockReplication();
/**
/**
* @return the storage policy ID.
*/
public byte getStoragePolicyID();
@ -90,5 +90,10 @@ public interface BlockCollection {
/**
* @return whether the block collection is in striping format
*/
public boolean isStriped();
boolean isStriped();
/**
* @return the id for the block collection
*/
long getId();
}

View File

@ -19,12 +19,16 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.util.LightWeightGSet;
import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
/**
* For a given block (or an erasure coding block group), BlockInfo class
* maintains 1) the {@link BlockCollection} it is part of, and 2) datanodes
@ -33,11 +37,20 @@ import org.apache.hadoop.util.LightWeightGSet;
*/
public abstract class BlockInfo extends Block
implements LightWeightGSet.LinkedElement {
public static final BlockInfo[] EMPTY_ARRAY = {};
private BlockCollection bc;
/**
* Replication factor.
*/
private short replication;
/** For implementing {@link LightWeightGSet.LinkedElement} interface */
/**
* Block collection ID.
*/
private long bcId;
/** For implementing {@link LightWeightGSet.LinkedElement} interface. */
private LightWeightGSet.LinkedElement nextLinkedElement;
/**
@ -63,21 +76,35 @@ public abstract class BlockInfo extends Block
*/
public BlockInfo(short size) {
this.triplets = new Object[3 * size];
this.bc = null;
this.bcId = INVALID_INODE_ID;
this.replication = isStriped() ? 0 : size;
}
public BlockInfo(Block blk, short size) {
super(blk);
this.triplets = new Object[3 * size];
this.bc = null;
this.triplets = new Object[3*size];
this.bcId = INVALID_INODE_ID;
this.replication = isStriped() ? 0 : size;
}
public BlockCollection getBlockCollection() {
return bc;
public short getReplication() {
return replication;
}
public void setBlockCollection(BlockCollection bc) {
this.bc = bc;
public void setReplication(short repl) {
this.replication = repl;
}
public long getBlockCollectionId() {
return bcId;
}
public void setBlockCollectionId(long id) {
this.bcId = id;
}
public boolean isDeleted() {
return bcId == INVALID_INODE_ID;
}
public DatanodeDescriptor getDatanode(int index) {
@ -291,10 +318,6 @@ public abstract class BlockInfo extends Block
return this;
}
public boolean isDeleted() {
return (bc == null);
}
@Override
public int hashCode() {
// Super implementation is sufficient
@ -342,7 +365,8 @@ public abstract class BlockInfo extends Block
public void convertToBlockUnderConstruction(BlockUCState s,
DatanodeStorageInfo[] targets) {
if (isComplete()) {
uc = new BlockUnderConstructionFeature(this, s, targets, this.isStriped());
uc = new BlockUnderConstructionFeature(this, s, targets,
this.isStriped());
} else {
// the block is already under construction
uc.setBlockUCState(s);
@ -377,7 +401,12 @@ public abstract class BlockInfo extends Block
setGenerationStamp(genStamp);
// Remove the replicas with wrong gen stamp
uc.removeStaleReplicas(this);
List<ReplicaUnderConstruction> staleReplicas = uc.getStaleReplicas(genStamp);
for (ReplicaUnderConstruction r : staleReplicas) {
r.getExpectedStorageLocation().removeBlock(this);
NameNode.blockStateChangeLog.debug("BLOCK* Removing stale replica "
+ "from location: {}", r.getExpectedStorageLocation());
}
}
/**

View File

@ -34,17 +34,6 @@ public class BlockInfoContiguous extends BlockInfo {
super(blk, size);
}
/**
* Copy construction. This is used to convert
* BlockReplicationInfoUnderConstruction
*
* @param from BlockReplicationInfo to copy from.
*/
protected BlockInfoContiguous(BlockInfoContiguous from) {
this(from, (short) (from.triplets.length / 3));
this.setBlockCollection(from.getBlockCollection());
}
/**
* Ensure that there is enough space to include num more triplets.
* @return first free triplet index.

Some files were not shown because too many files have changed in this diff Show More