Merging r1544666 through r1547120 from trunk to branch HDFS-2832
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1547122 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
18159be495
|
@ -388,6 +388,8 @@ Release 2.3.0 - UNRELEASED
|
|||
HADOOP-10111. Allow DU to be initialized with an initial value (Kihwal Lee
|
||||
via jeagles)
|
||||
|
||||
HADOOP-10126. LightWeightGSet log message is confusing. (Vinay via suresh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
|
||||
|
@ -450,6 +452,9 @@ Release 2.3.0 - UNRELEASED
|
|||
HADOOP-10107. Server.getNumOpenConnections may throw NPE. (Kihwal Lee via
|
||||
jing9)
|
||||
|
||||
HADOOP-10135 writes to swift fs over partition size leave temp files and
|
||||
empty output file (David Dobbins via stevel)
|
||||
|
||||
Release 2.2.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -467,6 +472,9 @@ Release 2.2.1 - UNRELEASED
|
|||
HADOOP-9623 Update jets3t dependency to 0.9.0. (Amandeep Khurana via Colin
|
||||
Patrick McCabe)
|
||||
|
||||
HADOOP-10132. RPC#stopProxy() should log the class of proxy when IllegalArgumentException
|
||||
is encountered (Ted yu via umamahesh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -508,6 +516,9 @@ Release 2.2.1 - UNRELEASED
|
|||
HADOOP-9114. After defined the dfs.checksum.type as the NULL, write file and hflush will
|
||||
through java.lang.ArrayIndexOutOfBoundsException (Sathish via umamahesh)
|
||||
|
||||
HADOOP-10130. RawLocalFS::LocalFSFileInputStream.pread does not track
|
||||
FS::Statistics (Binglin Chang via Colin Patrick McCabe)
|
||||
|
||||
Release 2.2.0 - 2013-10-13
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -83,39 +83,6 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
setConf(conf);
|
||||
}
|
||||
|
||||
class TrackingFileInputStream extends FileInputStream {
|
||||
public TrackingFileInputStream(File f) throws IOException {
|
||||
super(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
int result = super.read();
|
||||
if (result != -1) {
|
||||
statistics.incrementBytesRead(1);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] data) throws IOException {
|
||||
int result = super.read(data);
|
||||
if (result != -1) {
|
||||
statistics.incrementBytesRead(result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] data, int offset, int length) throws IOException {
|
||||
int result = super.read(data, offset, length);
|
||||
if (result != -1) {
|
||||
statistics.incrementBytesRead(result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/*******************************************************
|
||||
* For open()'s FSInputStream.
|
||||
*******************************************************/
|
||||
|
@ -124,7 +91,7 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
private long position;
|
||||
|
||||
public LocalFSFileInputStream(Path f) throws IOException {
|
||||
this.fis = new TrackingFileInputStream(pathToFile(f));
|
||||
fis = new FileInputStream(pathToFile(f));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -159,6 +126,7 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
int value = fis.read();
|
||||
if (value >= 0) {
|
||||
this.position++;
|
||||
statistics.incrementBytesRead(1);
|
||||
}
|
||||
return value;
|
||||
} catch (IOException e) { // unexpected exception
|
||||
|
@ -172,6 +140,7 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
int value = fis.read(b, off, len);
|
||||
if (value > 0) {
|
||||
this.position += value;
|
||||
statistics.incrementBytesRead(value);
|
||||
}
|
||||
return value;
|
||||
} catch (IOException e) { // unexpected exception
|
||||
|
@ -184,7 +153,11 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
throws IOException {
|
||||
ByteBuffer bb = ByteBuffer.wrap(b, off, len);
|
||||
try {
|
||||
return fis.getChannel().read(bb, position);
|
||||
int value = fis.getChannel().read(bb, position);
|
||||
if (value > 0) {
|
||||
statistics.incrementBytesRead(value);
|
||||
}
|
||||
return value;
|
||||
} catch (IOException e) {
|
||||
throw new FSError(e);
|
||||
}
|
||||
|
|
|
@ -19,12 +19,13 @@ package org.apache.hadoop.http;
|
|||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.net.BindException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Enumeration;
|
||||
|
@ -32,7 +33,6 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.net.ssl.SSLServerSocketFactory;
|
||||
import javax.servlet.Filter;
|
||||
import javax.servlet.FilterChain;
|
||||
import javax.servlet.FilterConfig;
|
||||
|
@ -60,7 +60,6 @@ import org.apache.hadoop.security.SecurityUtil;
|
|||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
import org.apache.hadoop.security.ssl.SSLFactory;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.mortbay.io.Buffer;
|
||||
|
@ -71,8 +70,8 @@ import org.mortbay.jetty.RequestLog;
|
|||
import org.mortbay.jetty.Server;
|
||||
import org.mortbay.jetty.handler.ContextHandler;
|
||||
import org.mortbay.jetty.handler.ContextHandlerCollection;
|
||||
import org.mortbay.jetty.handler.RequestLogHandler;
|
||||
import org.mortbay.jetty.handler.HandlerCollection;
|
||||
import org.mortbay.jetty.handler.RequestLogHandler;
|
||||
import org.mortbay.jetty.nio.SelectChannelConnector;
|
||||
import org.mortbay.jetty.security.SslSocketConnector;
|
||||
import org.mortbay.jetty.servlet.Context;
|
||||
|
@ -86,6 +85,7 @@ import org.mortbay.thread.QueuedThreadPool;
|
|||
import org.mortbay.util.MultiException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.sun.jersey.spi.container.servlet.ServletContainer;
|
||||
|
||||
/**
|
||||
|
@ -114,11 +114,25 @@ public class HttpServer implements FilterContainer {
|
|||
|
||||
public static final String BIND_ADDRESS = "bind.address";
|
||||
|
||||
private AccessControlList adminsAcl;
|
||||
private final AccessControlList adminsAcl;
|
||||
|
||||
private SSLFactory sslFactory;
|
||||
protected final Server webServer;
|
||||
protected final Connector listener;
|
||||
|
||||
private static class ListenerInfo {
|
||||
/**
|
||||
* Boolean flag to determine whether the HTTP server should clean up the
|
||||
* listener in stop().
|
||||
*/
|
||||
private final boolean isManaged;
|
||||
private final Connector listener;
|
||||
private ListenerInfo(boolean isManaged, Connector listener) {
|
||||
this.isManaged = isManaged;
|
||||
this.listener = listener;
|
||||
}
|
||||
}
|
||||
|
||||
private final List<ListenerInfo> listeners = Lists.newArrayList();
|
||||
|
||||
protected final WebAppContext webAppContext;
|
||||
protected final boolean findPort;
|
||||
protected final Map<Context, Boolean> defaultContexts =
|
||||
|
@ -127,34 +141,111 @@ public class HttpServer implements FilterContainer {
|
|||
static final String STATE_DESCRIPTION_ALIVE = " - alive";
|
||||
static final String STATE_DESCRIPTION_NOT_LIVE = " - not live";
|
||||
|
||||
private final boolean listenerStartedExternally;
|
||||
|
||||
/**
|
||||
* Class to construct instances of HTTP server with specific options.
|
||||
*/
|
||||
public static class Builder {
|
||||
String name;
|
||||
String bindAddress;
|
||||
Integer port;
|
||||
Boolean findPort;
|
||||
Configuration conf;
|
||||
Connector connector;
|
||||
String[] pathSpecs;
|
||||
AccessControlList adminsAcl;
|
||||
boolean securityEnabled = false;
|
||||
String usernameConfKey = null;
|
||||
String keytabConfKey = null;
|
||||
private ArrayList<URI> endpoints = Lists.newArrayList();
|
||||
private Connector connector;
|
||||
private String name;
|
||||
private Configuration conf;
|
||||
private String[] pathSpecs;
|
||||
private AccessControlList adminsAcl;
|
||||
private boolean securityEnabled = false;
|
||||
private String usernameConfKey;
|
||||
private String keytabConfKey;
|
||||
private boolean needsClientAuth;
|
||||
private String trustStore;
|
||||
private String trustStorePassword;
|
||||
private String trustStoreType;
|
||||
|
||||
private String keyStore;
|
||||
private String keyStorePassword;
|
||||
private String keyStoreType;
|
||||
|
||||
// The -keypass option in keytool
|
||||
private String keyPassword;
|
||||
|
||||
@Deprecated
|
||||
private String bindAddress;
|
||||
@Deprecated
|
||||
private int port = -1;
|
||||
|
||||
private boolean findPort;
|
||||
|
||||
private String hostName;
|
||||
|
||||
public Builder setName(String name){
|
||||
this.name = name;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add an endpoint that the HTTP server should listen to.
|
||||
*
|
||||
* @param endpoint
|
||||
* the endpoint of that the HTTP server should listen to. The
|
||||
* scheme specifies the protocol (i.e. HTTP / HTTPS), the host
|
||||
* specifies the binding address, and the port specifies the
|
||||
* listening port. Unspecified or zero port means that the server
|
||||
* can listen to any port.
|
||||
*/
|
||||
public Builder addEndpoint(URI endpoint) {
|
||||
endpoints.add(endpoint);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the hostname of the http server. The host name is used to resolve the
|
||||
* _HOST field in Kerberos principals. The hostname of the first listener
|
||||
* will be used if the name is unspecified.
|
||||
*/
|
||||
public Builder hostName(String hostName) {
|
||||
this.hostName = hostName;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder trustStore(String location, String password, String type) {
|
||||
this.trustStore = location;
|
||||
this.trustStorePassword = password;
|
||||
this.trustStoreType = type;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder keyStore(String location, String password, String type) {
|
||||
this.keyStore = location;
|
||||
this.keyStorePassword = password;
|
||||
this.keyStoreType = type;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder keyPassword(String password) {
|
||||
this.keyPassword = password;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specify whether the server should authorize the client in SSL
|
||||
* connections.
|
||||
*/
|
||||
public Builder needsClientAuth(boolean value) {
|
||||
this.needsClientAuth = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use addEndpoint() instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public Builder setBindAddress(String bindAddress){
|
||||
this.bindAddress = bindAddress;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use addEndpoint() instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public Builder setPort(int port) {
|
||||
this.port = port;
|
||||
return this;
|
||||
|
@ -204,25 +295,70 @@ public class HttpServer implements FilterContainer {
|
|||
if (this.name == null) {
|
||||
throw new HadoopIllegalArgumentException("name is not set");
|
||||
}
|
||||
if (this.bindAddress == null) {
|
||||
throw new HadoopIllegalArgumentException("bindAddress is not set");
|
||||
|
||||
// Make the behavior compatible with deprecated interfaces
|
||||
if (bindAddress != null && port != -1) {
|
||||
try {
|
||||
endpoints.add(0, new URI("http", "", bindAddress, port, "", "", ""));
|
||||
} catch (URISyntaxException e) {
|
||||
throw new HadoopIllegalArgumentException("Invalid endpoint: "+ e);
|
||||
}
|
||||
if (this.port == null) {
|
||||
throw new HadoopIllegalArgumentException("port is not set");
|
||||
}
|
||||
if (this.findPort == null) {
|
||||
throw new HadoopIllegalArgumentException("findPort is not set");
|
||||
|
||||
if (endpoints.size() == 0) {
|
||||
throw new HadoopIllegalArgumentException("No endpoints specified");
|
||||
}
|
||||
|
||||
if (hostName == null) {
|
||||
hostName = endpoints.get(0).getHost();
|
||||
}
|
||||
|
||||
if (this.conf == null) {
|
||||
conf = new Configuration();
|
||||
}
|
||||
|
||||
HttpServer server = new HttpServer(this.name, this.bindAddress, this.port,
|
||||
this.findPort, this.conf, this.adminsAcl, this.connector, this.pathSpecs);
|
||||
HttpServer server = new HttpServer(this);
|
||||
|
||||
if (this.securityEnabled) {
|
||||
server.initSpnego(this.conf, this.usernameConfKey, this.keytabConfKey);
|
||||
server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
|
||||
}
|
||||
|
||||
if (connector != null) {
|
||||
server.addUnmanagedListener(connector);
|
||||
}
|
||||
|
||||
for (URI ep : endpoints) {
|
||||
Connector listener = null;
|
||||
String scheme = ep.getScheme();
|
||||
if ("http".equals(scheme)) {
|
||||
listener = HttpServer.createDefaultChannelConnector();
|
||||
} else if ("https".equals(scheme)) {
|
||||
SslSocketConnector c = new SslSocketConnector();
|
||||
c.setNeedClientAuth(needsClientAuth);
|
||||
c.setKeyPassword(keyPassword);
|
||||
|
||||
if (keyStore != null) {
|
||||
c.setKeystore(keyStore);
|
||||
c.setKeystoreType(keyStoreType);
|
||||
c.setPassword(keyStorePassword);
|
||||
}
|
||||
|
||||
if (trustStore != null) {
|
||||
c.setTruststore(trustStore);
|
||||
c.setTruststoreType(trustStoreType);
|
||||
c.setTrustPassword(trustStorePassword);
|
||||
}
|
||||
listener = c;
|
||||
|
||||
} else {
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"unknown scheme for endpoint:" + ep);
|
||||
}
|
||||
listener.setHost(ep.getHost());
|
||||
listener.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
|
||||
server.addManagedListener(listener);
|
||||
}
|
||||
server.loadListeners();
|
||||
return server;
|
||||
}
|
||||
}
|
||||
|
@ -314,51 +450,39 @@ public class HttpServer implements FilterContainer {
|
|||
* @param pathSpecs Path specifications that this httpserver will be serving.
|
||||
* These will be added to any filters.
|
||||
*/
|
||||
@Deprecated
|
||||
public HttpServer(String name, String bindAddress, int port,
|
||||
boolean findPort, Configuration conf, AccessControlList adminsAcl,
|
||||
Connector connector, String[] pathSpecs) throws IOException {
|
||||
webServer = new Server();
|
||||
this.findPort = findPort;
|
||||
this.adminsAcl = adminsAcl;
|
||||
|
||||
if(connector == null) {
|
||||
listenerStartedExternally = false;
|
||||
if (HttpConfig.isSecure()) {
|
||||
sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
|
||||
try {
|
||||
sslFactory.init();
|
||||
} catch (GeneralSecurityException ex) {
|
||||
throw new IOException(ex);
|
||||
}
|
||||
SslSocketConnector sslListener = new SslSocketConnector() {
|
||||
@Override
|
||||
protected SSLServerSocketFactory createFactory() throws Exception {
|
||||
return sslFactory.createSSLServerSocketFactory();
|
||||
}
|
||||
};
|
||||
listener = sslListener;
|
||||
} else {
|
||||
listener = createBaseListener(conf);
|
||||
}
|
||||
listener.setHost(bindAddress);
|
||||
listener.setPort(port);
|
||||
LOG.info("SSL is enabled on " + toString());
|
||||
} else {
|
||||
listenerStartedExternally = true;
|
||||
listener = connector;
|
||||
this(new Builder().setName(name)
|
||||
.addEndpoint(URI.create("http://" + bindAddress + ":" + port))
|
||||
.setFindPort(findPort).setConf(conf).setACL(adminsAcl)
|
||||
.setConnector(connector).setPathSpec(pathSpecs));
|
||||
}
|
||||
|
||||
webServer.addConnector(listener);
|
||||
private HttpServer(final Builder b) throws IOException {
|
||||
final String appDir = getWebAppsPath(b.name);
|
||||
this.webServer = new Server();
|
||||
this.adminsAcl = b.adminsAcl;
|
||||
this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir);
|
||||
this.findPort = b.findPort;
|
||||
initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs);
|
||||
}
|
||||
|
||||
private void initializeWebServer(String name, String hostName,
|
||||
Configuration conf, String[] pathSpecs)
|
||||
throws FileNotFoundException, IOException {
|
||||
|
||||
Preconditions.checkNotNull(webAppContext);
|
||||
|
||||
int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1);
|
||||
// If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
|
||||
// default value (currently 250).
|
||||
QueuedThreadPool threadPool = maxThreads == -1 ?
|
||||
new QueuedThreadPool() : new QueuedThreadPool(maxThreads);
|
||||
QueuedThreadPool threadPool = maxThreads == -1 ? new QueuedThreadPool()
|
||||
: new QueuedThreadPool(maxThreads);
|
||||
threadPool.setDaemon(true);
|
||||
webServer.setThreadPool(threadPool);
|
||||
|
||||
final String appDir = getWebAppsPath(name);
|
||||
ContextHandlerCollection contexts = new ContextHandlerCollection();
|
||||
RequestLog requestLog = HttpRequestLog.getRequestLog(name);
|
||||
|
||||
|
@ -368,18 +492,12 @@ public class HttpServer implements FilterContainer {
|
|||
HandlerCollection handlers = new HandlerCollection();
|
||||
handlers.setHandlers(new Handler[] { requestLogHandler, contexts });
|
||||
webServer.setHandler(handlers);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
webServer.setHandler(contexts);
|
||||
}
|
||||
|
||||
webAppContext = new WebAppContext();
|
||||
webAppContext.setDisplayName(name);
|
||||
webAppContext.setContextPath("/");
|
||||
webAppContext.setWar(appDir + "/" + name);
|
||||
webAppContext.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
|
||||
webAppContext.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
|
||||
addNoCacheFilter(webAppContext);
|
||||
final String appDir = getWebAppsPath(name);
|
||||
|
||||
webServer.addHandler(webAppContext);
|
||||
|
||||
addDefaultApps(contexts, appDir, conf);
|
||||
|
@ -388,7 +506,7 @@ public class HttpServer implements FilterContainer {
|
|||
final FilterInitializer[] initializers = getFilterInitializers(conf);
|
||||
if (initializers != null) {
|
||||
conf = new Configuration(conf);
|
||||
conf.set(BIND_ADDRESS, bindAddress);
|
||||
conf.set(BIND_ADDRESS, hostName);
|
||||
for (FilterInitializer c : initializers) {
|
||||
c.initFilter(this, conf);
|
||||
}
|
||||
|
@ -404,10 +522,29 @@ public class HttpServer implements FilterContainer {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void addNoCacheFilter(WebAppContext ctxt) {
|
||||
defineFilter(ctxt, NO_CACHE_FILTER,
|
||||
NoCacheFilter.class.getName(), Collections.EMPTY_MAP, new String[] { "/*"});
|
||||
private void addUnmanagedListener(Connector connector) {
|
||||
listeners.add(new ListenerInfo(false, connector));
|
||||
}
|
||||
|
||||
private void addManagedListener(Connector connector) {
|
||||
listeners.add(new ListenerInfo(true, connector));
|
||||
}
|
||||
|
||||
private static WebAppContext createWebAppContext(String name,
|
||||
Configuration conf, AccessControlList adminsAcl, final String appDir) {
|
||||
WebAppContext ctx = new WebAppContext();
|
||||
ctx.setDisplayName(name);
|
||||
ctx.setContextPath("/");
|
||||
ctx.setWar(appDir + "/" + name);
|
||||
ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
|
||||
ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
|
||||
addNoCacheFilter(ctx);
|
||||
return ctx;
|
||||
}
|
||||
|
||||
private static void addNoCacheFilter(WebAppContext ctxt) {
|
||||
defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(),
|
||||
Collections.<String, String> emptyMap(), new String[] { "/*" });
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -651,7 +788,7 @@ public class HttpServer implements FilterContainer {
|
|||
/**
|
||||
* Define a filter for a context and set up default url mappings.
|
||||
*/
|
||||
public void defineFilter(Context ctx, String name,
|
||||
public static void defineFilter(Context ctx, String name,
|
||||
String classname, Map<String,String> parameters, String[] urls) {
|
||||
|
||||
FilterHolder holder = new FilterHolder();
|
||||
|
@ -715,21 +852,29 @@ public class HttpServer implements FilterContainer {
|
|||
* Get the port that the server is on
|
||||
* @return the port
|
||||
*/
|
||||
@Deprecated
|
||||
public int getPort() {
|
||||
return webServer.getConnectors()[0].getLocalPort();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the port that corresponds to a particular connector. In the case of
|
||||
* HDFS, the second connector corresponds to the HTTPS connector.
|
||||
* Get the address that corresponds to a particular connector.
|
||||
*
|
||||
* @return the corresponding port for the connector, or -1 if there's no such
|
||||
* connector.
|
||||
* @return the corresponding address for the connector, or null if there's no
|
||||
* such connector or the connector is not bounded.
|
||||
*/
|
||||
public int getConnectorPort(int index) {
|
||||
public InetSocketAddress getConnectorAddress(int index) {
|
||||
Preconditions.checkArgument(index >= 0);
|
||||
return index < webServer.getConnectors().length ?
|
||||
webServer.getConnectors()[index].getLocalPort() : -1;
|
||||
if (index > webServer.getConnectors().length)
|
||||
return null;
|
||||
|
||||
Connector c = webServer.getConnectors()[index];
|
||||
if (c.getLocalPort() == -1) {
|
||||
// The connector is not bounded
|
||||
return null;
|
||||
}
|
||||
|
||||
return new InetSocketAddress(c.getHost(), c.getLocalPort());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -741,67 +886,13 @@ public class HttpServer implements FilterContainer {
|
|||
pool.setMaxThreads(max);
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure an ssl listener on the server.
|
||||
* @param addr address to listen on
|
||||
* @param keystore location of the keystore
|
||||
* @param storPass password for the keystore
|
||||
* @param keyPass password for the key
|
||||
* @deprecated Use {@link #addSslListener(InetSocketAddress, Configuration, boolean)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void addSslListener(InetSocketAddress addr, String keystore,
|
||||
String storPass, String keyPass) throws IOException {
|
||||
if (webServer.isStarted()) {
|
||||
throw new IOException("Failed to add ssl listener");
|
||||
}
|
||||
SslSocketConnector sslListener = new SslSocketConnector();
|
||||
sslListener.setHost(addr.getHostName());
|
||||
sslListener.setPort(addr.getPort());
|
||||
sslListener.setKeystore(keystore);
|
||||
sslListener.setPassword(storPass);
|
||||
sslListener.setKeyPassword(keyPass);
|
||||
webServer.addConnector(sslListener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure an ssl listener on the server.
|
||||
* @param addr address to listen on
|
||||
* @param sslConf conf to retrieve ssl options
|
||||
* @param needCertsAuth whether x509 certificate authentication is required
|
||||
*/
|
||||
public void addSslListener(InetSocketAddress addr, Configuration sslConf,
|
||||
boolean needCertsAuth) throws IOException {
|
||||
if (webServer.isStarted()) {
|
||||
throw new IOException("Failed to add ssl listener");
|
||||
}
|
||||
if (needCertsAuth) {
|
||||
// setting up SSL truststore for authenticating clients
|
||||
System.setProperty("javax.net.ssl.trustStore", sslConf.get(
|
||||
"ssl.server.truststore.location", ""));
|
||||
System.setProperty("javax.net.ssl.trustStorePassword", sslConf.get(
|
||||
"ssl.server.truststore.password", ""));
|
||||
System.setProperty("javax.net.ssl.trustStoreType", sslConf.get(
|
||||
"ssl.server.truststore.type", "jks"));
|
||||
}
|
||||
SslSocketConnector sslListener = new SslSocketConnector();
|
||||
sslListener.setHost(addr.getHostName());
|
||||
sslListener.setPort(addr.getPort());
|
||||
sslListener.setKeystore(sslConf.get("ssl.server.keystore.location"));
|
||||
sslListener.setPassword(sslConf.get("ssl.server.keystore.password", ""));
|
||||
sslListener.setKeyPassword(sslConf.get("ssl.server.keystore.keypassword", ""));
|
||||
sslListener.setKeystoreType(sslConf.get("ssl.server.keystore.type", "jks"));
|
||||
sslListener.setNeedClientAuth(needCertsAuth);
|
||||
webServer.addConnector(sslListener);
|
||||
}
|
||||
|
||||
protected void initSpnego(Configuration conf,
|
||||
private void initSpnego(Configuration conf, String hostName,
|
||||
String usernameConfKey, String keytabConfKey) throws IOException {
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
String principalInConf = conf.get(usernameConfKey);
|
||||
if (principalInConf != null && !principalInConf.isEmpty()) {
|
||||
params.put("kerberos.principal",
|
||||
SecurityUtil.getServerPrincipal(principalInConf, listener.getHost()));
|
||||
params.put("kerberos.principal", SecurityUtil.getServerPrincipal(
|
||||
principalInConf, hostName));
|
||||
}
|
||||
String httpKeytab = conf.get(keytabConfKey);
|
||||
if (httpKeytab != null && !httpKeytab.isEmpty()) {
|
||||
|
@ -819,8 +910,7 @@ public class HttpServer implements FilterContainer {
|
|||
public void start() throws IOException {
|
||||
try {
|
||||
try {
|
||||
openListener();
|
||||
LOG.info("Jetty bound to port " + listener.getLocalPort());
|
||||
openListeners();
|
||||
webServer.start();
|
||||
} catch (IOException ex) {
|
||||
LOG.info("HttpServer.start() threw a non Bind IOException", ex);
|
||||
|
@ -856,17 +946,22 @@ public class HttpServer implements FilterContainer {
|
|||
}
|
||||
}
|
||||
|
||||
private void loadListeners() {
|
||||
for (ListenerInfo li : listeners) {
|
||||
webServer.addConnector(li.listener);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Open the main listener for the server
|
||||
* @throws Exception
|
||||
*/
|
||||
void openListener() throws Exception {
|
||||
if (listener.getLocalPort() != -1) { // it's already bound
|
||||
return;
|
||||
}
|
||||
if (listenerStartedExternally) { // Expect that listener was started securely
|
||||
throw new Exception("Expected webserver's listener to be started " +
|
||||
"previously but wasn't");
|
||||
void openListeners() throws Exception {
|
||||
for (ListenerInfo li : listeners) {
|
||||
Connector listener = li.listener;
|
||||
if (!li.isManaged || li.listener.getLocalPort() != -1) {
|
||||
// This listener is either started externally or has been bound
|
||||
continue;
|
||||
}
|
||||
int port = listener.getPort();
|
||||
while (true) {
|
||||
|
@ -875,11 +970,12 @@ public class HttpServer implements FilterContainer {
|
|||
try {
|
||||
listener.close();
|
||||
listener.open();
|
||||
LOG.info("Jetty bound to port " + listener.getLocalPort());
|
||||
break;
|
||||
} catch (BindException ex) {
|
||||
if (port == 0 || !findPort) {
|
||||
BindException be = new BindException(
|
||||
"Port in use: " + listener.getHost() + ":" + listener.getPort());
|
||||
BindException be = new BindException("Port in use: "
|
||||
+ listener.getHost() + ":" + listener.getPort());
|
||||
be.initCause(ex);
|
||||
throw be;
|
||||
}
|
||||
|
@ -889,17 +985,6 @@ public class HttpServer implements FilterContainer {
|
|||
Thread.sleep(100);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the bind address of the listener.
|
||||
* @return InetSocketAddress of the listener
|
||||
*/
|
||||
public InetSocketAddress getListenerAddress() {
|
||||
int port = listener.getLocalPort();
|
||||
if (port == -1) { // not bound, return requested port
|
||||
port = listener.getPort();
|
||||
}
|
||||
return new InetSocketAddress(listener.getHost(), port);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -907,23 +992,20 @@ public class HttpServer implements FilterContainer {
|
|||
*/
|
||||
public void stop() throws Exception {
|
||||
MultiException exception = null;
|
||||
try {
|
||||
listener.close();
|
||||
} catch (Exception e) {
|
||||
LOG.error("Error while stopping listener for webapp"
|
||||
+ webAppContext.getDisplayName(), e);
|
||||
exception = addMultiException(exception, e);
|
||||
for (ListenerInfo li : listeners) {
|
||||
if (!li.isManaged) {
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
if (sslFactory != null) {
|
||||
sslFactory.destroy();
|
||||
}
|
||||
li.listener.close();
|
||||
} catch (Exception e) {
|
||||
LOG.error("Error while destroying the SSLFactory"
|
||||
LOG.error(
|
||||
"Error while stopping listener for webapp"
|
||||
+ webAppContext.getDisplayName(), e);
|
||||
exception = addMultiException(exception, e);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// clear & stop webAppContext attributes to avoid memory leaks.
|
||||
|
@ -934,6 +1016,7 @@ public class HttpServer implements FilterContainer {
|
|||
+ webAppContext.getDisplayName(), e);
|
||||
exception = addMultiException(exception, e);
|
||||
}
|
||||
|
||||
try {
|
||||
webServer.stop();
|
||||
} catch (Exception e) {
|
||||
|
@ -974,10 +1057,17 @@ public class HttpServer implements FilterContainer {
|
|||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return listener != null ?
|
||||
("HttpServer at http://" + listener.getHost() + ":" + listener.getLocalPort() + "/"
|
||||
+ (isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE))
|
||||
: "Inactive HttpServer";
|
||||
if (listeners.size() == 0) {
|
||||
return "Inactive HttpServer";
|
||||
} else {
|
||||
StringBuilder sb = new StringBuilder("HttpServer (")
|
||||
.append(isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE).append("), listening at:");
|
||||
for (ListenerInfo li : listeners) {
|
||||
Connector l = li.listener;
|
||||
sb.append(l.getHost()).append(":").append(l.getPort()).append("/,");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -142,6 +142,10 @@ public class NativeIO {
|
|||
NativeIO.POSIX.posixFadviseIfPossible(identifier, fd, offset,
|
||||
len, flags);
|
||||
}
|
||||
|
||||
public boolean verifyCanMlock() {
|
||||
return NativeIO.isAvailable();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -163,6 +167,10 @@ public class NativeIO {
|
|||
public long getOperatingSystemPageSize() {
|
||||
return 4096;
|
||||
}
|
||||
|
||||
public boolean verifyCanMlock() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
|
|
|
@ -634,7 +634,7 @@ public class RPC {
|
|||
} catch (IOException e) {
|
||||
LOG.error("Closing proxy or invocation handler caused exception", e);
|
||||
} catch (IllegalArgumentException e) {
|
||||
LOG.error("RPC.stopProxy called on non proxy.", e);
|
||||
LOG.error("RPC.stopProxy called on non proxy: class=" + proxy.getClass().getName(), e);
|
||||
}
|
||||
|
||||
// If you see this error on a mock object in a unit test you're
|
||||
|
|
|
@ -22,19 +22,14 @@ import java.io.IOException;
|
|||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
import java.net.UnknownHostException;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.ServiceLoader;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.security.auth.Subject;
|
||||
import javax.security.auth.kerberos.KerberosPrincipal;
|
||||
import javax.security.auth.kerberos.KerberosTicket;
|
||||
|
||||
|
@ -44,22 +39,19 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.ssl.SSLFactory;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenInfo;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
//this will need to be replaced someday when there is a suitable replacement
|
||||
import sun.net.dns.ResolverConfiguration;
|
||||
import sun.net.util.IPAddressUtil;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
@InterfaceStability.Evolving
|
||||
public class SecurityUtil {
|
||||
|
@ -73,22 +65,12 @@ public class SecurityUtil {
|
|||
@VisibleForTesting
|
||||
static HostResolver hostResolver;
|
||||
|
||||
private static SSLFactory sslFactory;
|
||||
|
||||
static {
|
||||
Configuration conf = new Configuration();
|
||||
boolean useIp = conf.getBoolean(
|
||||
CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP,
|
||||
CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT);
|
||||
setTokenServiceUseIp(useIp);
|
||||
if (HttpConfig.isSecure()) {
|
||||
sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
|
||||
try {
|
||||
sslFactory.init();
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -102,29 +84,6 @@ public class SecurityUtil {
|
|||
: new StandardHostResolver();
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the original TGT within the current subject's credentials. Cross-realm
|
||||
* TGT's of the form "krbtgt/TWO.COM@ONE.COM" may be present.
|
||||
*
|
||||
* @return The TGT from the current subject
|
||||
* @throws IOException
|
||||
* if TGT can't be found
|
||||
*/
|
||||
private static KerberosTicket getTgtFromSubject() throws IOException {
|
||||
Subject current = Subject.getSubject(AccessController.getContext());
|
||||
if (current == null) {
|
||||
throw new IOException(
|
||||
"Can't get TGT from current Subject, because it is null");
|
||||
}
|
||||
Set<KerberosTicket> tickets = current
|
||||
.getPrivateCredentials(KerberosTicket.class);
|
||||
for (KerberosTicket t : tickets) {
|
||||
if (isOriginalTGT(t))
|
||||
return t;
|
||||
}
|
||||
throw new IOException("Failed to find TGT from current Subject:"+current);
|
||||
}
|
||||
|
||||
/**
|
||||
* TGS must have the server principal of the form "krbtgt/FOO@FOO".
|
||||
* @param principal
|
||||
|
@ -492,30 +451,6 @@ public class SecurityUtil {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a (if need be) secure connection to a URL in a secure environment
|
||||
* that is using SPNEGO to authenticate its URLs. All Namenode and Secondary
|
||||
* Namenode URLs that are protected via SPNEGO should be accessed via this
|
||||
* method.
|
||||
*
|
||||
* @param url to authenticate via SPNEGO.
|
||||
* @return A connection that has been authenticated via SPNEGO
|
||||
* @throws IOException If unable to authenticate via SPNEGO
|
||||
*/
|
||||
public static URLConnection openSecureHttpConnection(URL url) throws IOException {
|
||||
if (!HttpConfig.isSecure() && !UserGroupInformation.isSecurityEnabled()) {
|
||||
return url.openConnection();
|
||||
}
|
||||
|
||||
AuthenticatedURL.Token token = new AuthenticatedURL.Token();
|
||||
try {
|
||||
return new AuthenticatedURL(null, sslFactory).openConnection(url, token);
|
||||
} catch (AuthenticationException e) {
|
||||
throw new IOException("Exception trying to open authenticated connection to "
|
||||
+ url, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves a host subject to the security requirements determined by
|
||||
* hadoop.security.token.service.use_ip.
|
||||
|
|
|
@ -348,8 +348,11 @@ public class LightWeightGSet<K, E extends K> implements GSet<K, E> {
|
|||
|
||||
LOG.info("Computing capacity for map " + mapName);
|
||||
LOG.info("VM type = " + vmBit + "-bit");
|
||||
LOG.info(percentage + "% max memory = "
|
||||
+ StringUtils.TraditionalBinaryPrefix.long2String(maxMemory, "B", 1));
|
||||
LOG.info(percentage + "% max memory "
|
||||
+ StringUtils.TraditionalBinaryPrefix.long2String(maxMemory, "B", 1)
|
||||
+ " = "
|
||||
+ StringUtils.TraditionalBinaryPrefix.long2String((long) percentMemory,
|
||||
"B", 1));
|
||||
LOG.info("capacity = 2^" + exponent + " = " + c + " entries");
|
||||
return c;
|
||||
}
|
||||
|
|
|
@ -91,6 +91,7 @@ public abstract class FCStatisticsBaseTest {
|
|||
FSDataInputStream fstr = fc.open(filePath);
|
||||
byte[] buf = new byte[blockSize];
|
||||
int bytesRead = fstr.read(buf, 0, blockSize);
|
||||
fstr.read(0, buf, 0, blockSize);
|
||||
Assert.assertEquals(blockSize, bytesRead);
|
||||
verifyReadBytes(stats);
|
||||
verifyWrittenBytes(stats);
|
||||
|
|
|
@ -47,7 +47,8 @@ public class TestLocalFsFCStatistics extends FCStatisticsBaseTest {
|
|||
|
||||
@Override
|
||||
protected void verifyReadBytes(Statistics stats) {
|
||||
Assert.assertEquals(blockSize, stats.getBytesRead());
|
||||
// one blockSize for read, one for pread
|
||||
Assert.assertEquals(2*blockSize, stats.getBytesRead());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,13 +19,16 @@
|
|||
|
||||
package org.apache.hadoop.http;
|
||||
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
import org.junit.Assert;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.HttpServer.Builder;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.net.MalformedURLException;
|
||||
|
||||
|
@ -120,8 +123,9 @@ public class HttpServerFunctionalTest extends Assert {
|
|||
public static HttpServer createServer(String host, int port)
|
||||
throws IOException {
|
||||
prepareTestWebapp();
|
||||
return new HttpServer.Builder().setName(TEST).setBindAddress(host)
|
||||
.setPort(port).setFindPort(true).build();
|
||||
return new HttpServer.Builder().setName(TEST)
|
||||
.addEndpoint(URI.create("http://" + host + ":" + port))
|
||||
.setFindPort(true).build();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -131,8 +135,7 @@ public class HttpServerFunctionalTest extends Assert {
|
|||
* @throws IOException if it could not be created
|
||||
*/
|
||||
public static HttpServer createServer(String webapp) throws IOException {
|
||||
return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0")
|
||||
.setPort(0).setFindPort(true).build();
|
||||
return localServerBuilder(webapp).setFindPort(true).build();
|
||||
}
|
||||
/**
|
||||
* Create an HttpServer instance for the given webapp
|
||||
|
@ -143,14 +146,17 @@ public class HttpServerFunctionalTest extends Assert {
|
|||
*/
|
||||
public static HttpServer createServer(String webapp, Configuration conf)
|
||||
throws IOException {
|
||||
return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0")
|
||||
.setPort(0).setFindPort(true).setConf(conf).build();
|
||||
return localServerBuilder(webapp).setFindPort(true).setConf(conf).build();
|
||||
}
|
||||
|
||||
public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl)
|
||||
throws IOException {
|
||||
return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0")
|
||||
.setPort(0).setFindPort(true).setConf(conf).setACL(adminsAcl).build();
|
||||
return localServerBuilder(webapp).setFindPort(true).setConf(conf).setACL(adminsAcl).build();
|
||||
}
|
||||
|
||||
private static Builder localServerBuilder(String webapp) {
|
||||
return new HttpServer.Builder().setName(webapp).addEndpoint(
|
||||
URI.create("http://localhost:0"));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -163,8 +169,7 @@ public class HttpServerFunctionalTest extends Assert {
|
|||
*/
|
||||
public static HttpServer createServer(String webapp, Configuration conf,
|
||||
String[] pathSpecs) throws IOException {
|
||||
return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0")
|
||||
.setPort(0).setFindPort(true).setConf(conf).setPathSpec(pathSpecs).build();
|
||||
return localServerBuilder(webapp).setFindPort(true).setConf(conf).setPathSpec(pathSpecs).build();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -201,8 +206,8 @@ public class HttpServerFunctionalTest extends Assert {
|
|||
public static URL getServerURL(HttpServer server)
|
||||
throws MalformedURLException {
|
||||
assertNotNull("No server", server);
|
||||
int port = server.getPort();
|
||||
return new URL("http://localhost:" + port + "/");
|
||||
return new URL("http://"
|
||||
+ NetUtils.getHostPortString(server.getConnectorAddress(0)));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -36,6 +36,7 @@ import javax.servlet.http.HttpServletRequest;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestGlobalFilter extends HttpServerFunctionalTest {
|
||||
|
@ -125,7 +126,8 @@ public class TestGlobalFilter extends HttpServerFunctionalTest {
|
|||
dataURL, streamFile, rootURL, allURL, outURL, logURL};
|
||||
|
||||
//access the urls
|
||||
final String prefix = "http://localhost:" + http.getPort();
|
||||
final String prefix = "http://"
|
||||
+ NetUtils.getHostPortString(http.getConnectorAddress(0));
|
||||
try {
|
||||
for(int i = 0; i < urls.length; i++) {
|
||||
access(prefix + urls[i]);
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.http;
|
|||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.util.Arrays;
|
||||
import java.util.Enumeration;
|
||||
|
@ -53,6 +53,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.http.HttpServer.QuotingInputFilter.RequestQuoter;
|
||||
import org.apache.hadoop.http.resource.JerseyResource;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.Groups;
|
||||
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -61,6 +62,8 @@ import org.junit.AfterClass;
|
|||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.internal.util.reflection.Whitebox;
|
||||
import org.mortbay.jetty.Connector;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
public class TestHttpServer extends HttpServerFunctionalTest {
|
||||
|
@ -362,11 +365,10 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
|||
MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB"));
|
||||
|
||||
HttpServer myServer = new HttpServer.Builder().setName("test")
|
||||
.setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build();
|
||||
.addEndpoint(new URI("http://localhost:0")).setFindPort(true).build();
|
||||
myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf);
|
||||
myServer.start();
|
||||
int port = myServer.getPort();
|
||||
String serverURL = "http://localhost:" + port + "/";
|
||||
String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
|
||||
for (String servlet : new String[] { "conf", "logs", "stacks",
|
||||
"logLevel", "metrics" }) {
|
||||
for (String user : new String[] { "userA", "userB" }) {
|
||||
|
@ -404,12 +406,13 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
|||
MyGroupsProvider.mapping.put("userE", Arrays.asList("groupE"));
|
||||
|
||||
HttpServer myServer = new HttpServer.Builder().setName("test")
|
||||
.setBindAddress("0.0.0.0").setPort(0).setFindPort(true).setConf(conf)
|
||||
.addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf)
|
||||
.setACL(new AccessControlList("userA,userB groupC,groupD")).build();
|
||||
myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf);
|
||||
myServer.start();
|
||||
int port = myServer.getPort();
|
||||
String serverURL = "http://localhost:" + port + "/";
|
||||
|
||||
String serverURL = "http://"
|
||||
+ NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
|
||||
for (String servlet : new String[] { "conf", "logs", "stacks",
|
||||
"logLevel", "metrics" }) {
|
||||
for (String user : new String[] { "userA", "userB", "userC", "userD" }) {
|
||||
|
@ -520,20 +523,20 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
|||
}
|
||||
|
||||
@Test public void testBindAddress() throws Exception {
|
||||
checkBindAddress("0.0.0.0", 0, false).stop();
|
||||
checkBindAddress("localhost", 0, false).stop();
|
||||
// hang onto this one for a bit more testing
|
||||
HttpServer myServer = checkBindAddress("localhost", 0, false);
|
||||
HttpServer myServer2 = null;
|
||||
try {
|
||||
int port = myServer.getListenerAddress().getPort();
|
||||
int port = myServer.getConnectorAddress(0).getPort();
|
||||
// it's already in use, true = expect a higher port
|
||||
myServer2 = checkBindAddress("localhost", port, true);
|
||||
// try to reuse the port
|
||||
port = myServer2.getListenerAddress().getPort();
|
||||
port = myServer2.getConnectorAddress(0).getPort();
|
||||
myServer2.stop();
|
||||
assertEquals(-1, myServer2.getPort()); // not bound
|
||||
myServer2.openListener();
|
||||
assertEquals(port, myServer2.getPort()); // expect same port
|
||||
assertNull(myServer2.getConnectorAddress(0)); // not bound
|
||||
myServer2.openListeners();
|
||||
assertEquals(port, myServer2.getConnectorAddress(0).getPort()); // expect same port
|
||||
} finally {
|
||||
myServer.stop();
|
||||
if (myServer2 != null) {
|
||||
|
@ -547,21 +550,24 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
|||
HttpServer server = createServer(host, port);
|
||||
try {
|
||||
// not bound, ephemeral should return requested port (0 for ephemeral)
|
||||
InetSocketAddress addr = server.getListenerAddress();
|
||||
assertEquals(port, addr.getPort());
|
||||
// verify hostname is what was given
|
||||
server.openListener();
|
||||
addr = server.getListenerAddress();
|
||||
assertEquals(host, addr.getHostName());
|
||||
List<?> listeners = (List<?>) Whitebox.getInternalState(server,
|
||||
"listeners");
|
||||
Connector listener = (Connector) Whitebox.getInternalState(
|
||||
listeners.get(0), "listener");
|
||||
|
||||
int boundPort = addr.getPort();
|
||||
assertEquals(port, listener.getPort());
|
||||
// verify hostname is what was given
|
||||
server.openListeners();
|
||||
assertEquals(host, server.getConnectorAddress(0).getHostName());
|
||||
|
||||
int boundPort = server.getConnectorAddress(0).getPort();
|
||||
if (port == 0) {
|
||||
assertTrue(boundPort != 0); // ephemeral should now return bound port
|
||||
} else if (findPort) {
|
||||
assertTrue(boundPort > port);
|
||||
// allow a little wiggle room to prevent random test failures if
|
||||
// some consecutive ports are already in use
|
||||
assertTrue(addr.getPort() - port < 8);
|
||||
assertTrue(boundPort - port < 8);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
server.stop();
|
||||
|
|
|
@ -36,6 +36,7 @@ import javax.servlet.http.HttpServletRequest;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestPathFilter extends HttpServerFunctionalTest {
|
||||
|
@ -126,7 +127,8 @@ public class TestPathFilter extends HttpServerFunctionalTest {
|
|||
|
||||
// access the urls and verify our paths specs got added to the
|
||||
// filters
|
||||
final String prefix = "http://localhost:" + http.getPort();
|
||||
final String prefix = "http://"
|
||||
+ NetUtils.getHostPortString(http.getConnectorAddress(0));
|
||||
try {
|
||||
for(int i = 0; i < filteredUrls.length; i++) {
|
||||
access(prefix + filteredUrls[i]);
|
||||
|
|
|
@ -17,105 +17,101 @@
|
|||
*/
|
||||
package org.apache.hadoop.http;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.InputStream;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
|
||||
import javax.net.ssl.HttpsURLConnection;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
||||
import org.apache.hadoop.security.ssl.SSLFactory;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import javax.net.ssl.HttpsURLConnection;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.InputStream;
|
||||
import java.io.Writer;
|
||||
import java.net.URL;
|
||||
|
||||
/**
|
||||
* This testcase issues SSL certificates configures the HttpServer to serve
|
||||
* HTTPS using the created certficates and calls an echo servlet using the
|
||||
* corresponding HTTPS URL.
|
||||
*/
|
||||
public class TestSSLHttpServer extends HttpServerFunctionalTest {
|
||||
private static final String CONFIG_SITE_XML = "sslhttpserver-site.xml";
|
||||
private static final String BASEDIR = System.getProperty("test.build.dir",
|
||||
"target/test-dir") + "/" + TestSSLHttpServer.class.getSimpleName();
|
||||
|
||||
private static final String BASEDIR =
|
||||
System.getProperty("test.build.dir", "target/test-dir") + "/" +
|
||||
TestSSLHttpServer.class.getSimpleName();
|
||||
|
||||
static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
|
||||
private static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
|
||||
private static Configuration conf;
|
||||
private static HttpServer server;
|
||||
private static URL baseUrl;
|
||||
private static String keystoresDir;
|
||||
private static String sslConfDir;
|
||||
private static SSLFactory clientSslFactory;
|
||||
|
||||
@BeforeClass
|
||||
public static void setup() throws Exception {
|
||||
conf = new Configuration();
|
||||
conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
|
||||
|
||||
@Before
|
||||
public void setup() throws Exception {
|
||||
HttpConfig.setPolicy(HttpConfig.Policy.HTTPS_ONLY);
|
||||
File base = new File(BASEDIR);
|
||||
FileUtil.fullyDelete(base);
|
||||
base.mkdirs();
|
||||
String classpathDir =
|
||||
KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
|
||||
Configuration conf = new Configuration();
|
||||
String keystoresDir = new File(BASEDIR).getAbsolutePath();
|
||||
String sslConfsDir =
|
||||
KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
|
||||
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, false);
|
||||
conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY, true);
|
||||
keystoresDir = new File(BASEDIR).getAbsolutePath();
|
||||
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
|
||||
|
||||
//we do this trick because the MR AppMaster is started in another VM and
|
||||
//the HttpServer configuration is not loaded from the job.xml but from the
|
||||
//site.xml files in the classpath
|
||||
Writer writer = new FileWriter(new File(classpathDir, CONFIG_SITE_XML));
|
||||
conf.writeXml(writer);
|
||||
writer.close();
|
||||
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
||||
Configuration sslConf = new Configuration(false);
|
||||
sslConf.addResource("ssl-server.xml");
|
||||
sslConf.addResource("ssl-client.xml");
|
||||
|
||||
conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
|
||||
conf.addResource(CONFIG_SITE_XML);
|
||||
server = createServer("test", conf);
|
||||
clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
|
||||
clientSslFactory.init();
|
||||
|
||||
server = new HttpServer.Builder()
|
||||
.setName("test")
|
||||
.addEndpoint(new URI("https://localhost"))
|
||||
.setConf(conf)
|
||||
.keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
|
||||
.keyStore(sslConf.get("ssl.server.keystore.location"),
|
||||
sslConf.get("ssl.server.keystore.password"),
|
||||
sslConf.get("ssl.server.keystore.type", "jks"))
|
||||
.trustStore(sslConf.get("ssl.server.truststore.location"),
|
||||
sslConf.get("ssl.server.truststore.password"),
|
||||
sslConf.get("ssl.server.truststore.type", "jks")).build();
|
||||
server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
|
||||
server.start();
|
||||
baseUrl = new URL("https://localhost:" + server.getPort() + "/");
|
||||
baseUrl = new URL("https://"
|
||||
+ NetUtils.getHostPortString(server.getConnectorAddress(0)));
|
||||
LOG.info("HTTP server started: " + baseUrl);
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanup() throws Exception {
|
||||
@AfterClass
|
||||
public static void cleanup() throws Exception {
|
||||
server.stop();
|
||||
String classpathDir =
|
||||
KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
|
||||
new File(classpathDir, CONFIG_SITE_XML).delete();
|
||||
HttpConfig.setPolicy(HttpConfig.Policy.HTTP_ONLY);
|
||||
FileUtil.fullyDelete(new File(BASEDIR));
|
||||
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
|
||||
clientSslFactory.destroy();
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testEcho() throws Exception {
|
||||
assertEquals("a:b\nc:d\n",
|
||||
readOut(new URL(baseUrl, "/echo?a=b&c=d")));
|
||||
assertEquals("a:b\nc<:d\ne:>\n",
|
||||
readOut(new URL(baseUrl, "/echo?a=b&c<=d&e=>")));
|
||||
assertEquals("a:b\nc:d\n", readOut(new URL(baseUrl, "/echo?a=b&c=d")));
|
||||
assertEquals("a:b\nc<:d\ne:>\n", readOut(new URL(baseUrl,
|
||||
"/echo?a=b&c<=d&e=>")));
|
||||
}
|
||||
|
||||
private static String readOut(URL url) throws Exception {
|
||||
StringBuilder out = new StringBuilder();
|
||||
HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
|
||||
Configuration conf = new Configuration();
|
||||
conf.addResource(CONFIG_SITE_XML);
|
||||
SSLFactory sslf = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
|
||||
sslf.init();
|
||||
conn.setSSLSocketFactory(sslf.createSSLSocketFactory());
|
||||
conn.setSSLSocketFactory(clientSslFactory.createSSLSocketFactory());
|
||||
InputStream in = conn.getInputStream();
|
||||
byte[] buffer = new byte[64 * 1024];
|
||||
int len = in.read(buffer);
|
||||
while (len > 0) {
|
||||
out.append(new String(buffer, 0, len));
|
||||
len = in.read(buffer);
|
||||
}
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
IOUtils.copyBytes(in, out, 1024);
|
||||
return out.toString();
|
||||
}
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ import javax.servlet.http.HttpServletRequest;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -125,7 +126,8 @@ public class TestServletFilter extends HttpServerFunctionalTest {
|
|||
}
|
||||
|
||||
//access the urls as the sequence
|
||||
final String prefix = "http://localhost:" + http.getPort();
|
||||
final String prefix = "http://"
|
||||
+ NetUtils.getHostPortString(http.getConnectorAddress(0));
|
||||
try {
|
||||
for(int i = 0; i < sequence.length; i++) {
|
||||
access(prefix + urls[sequence[i]]);
|
||||
|
@ -185,7 +187,7 @@ public class TestServletFilter extends HttpServerFunctionalTest {
|
|||
throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
HttpServer http = createTestServer(conf);
|
||||
http.defineFilter(http.webAppContext,
|
||||
HttpServer.defineFilter(http.webAppContext,
|
||||
"ErrorFilter", ErrorFilter.class.getName(),
|
||||
null, null);
|
||||
try {
|
||||
|
|
|
@ -21,8 +21,10 @@ import java.io.*;
|
|||
import java.net.*;
|
||||
|
||||
import org.apache.hadoop.http.HttpServer;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.commons.logging.*;
|
||||
import org.apache.commons.logging.impl.*;
|
||||
import org.apache.log4j.*;
|
||||
|
@ -43,15 +45,16 @@ public class TestLogLevel extends TestCase {
|
|||
assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));
|
||||
|
||||
HttpServer server = new HttpServer.Builder().setName("..")
|
||||
.setBindAddress("localhost").setPort(22222).setFindPort(true)
|
||||
.addEndpoint(new URI("http://localhost:0")).setFindPort(true)
|
||||
.build();
|
||||
|
||||
server.start();
|
||||
int port = server.getPort();
|
||||
String authority = NetUtils.getHostPortString(server
|
||||
.getConnectorAddress(0));
|
||||
|
||||
//servlet
|
||||
URL url = new URL("http://localhost:" + port
|
||||
+ "/logLevel?log=" + logName + "&level=" + Level.ERROR);
|
||||
URL url = new URL("http://" + authority + "/logLevel?log=" + logName
|
||||
+ "&level=" + Level.ERROR);
|
||||
out.println("*** Connecting to " + url);
|
||||
URLConnection connection = url.openConnection();
|
||||
connection.connect();
|
||||
|
@ -67,7 +70,7 @@ public class TestLogLevel extends TestCase {
|
|||
assertTrue(Level.ERROR.equals(log.getEffectiveLevel()));
|
||||
|
||||
//command line
|
||||
String[] args = {"-setlevel", "localhost:"+port, logName,""+Level.DEBUG};
|
||||
String[] args = {"-setlevel", authority, logName, Level.DEBUG.toString()};
|
||||
LogLevel.main(args);
|
||||
log.debug("log.debug3");
|
||||
log.info("log.info3");
|
||||
|
|
|
@ -1,95 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.portmap;
|
||||
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
|
||||
/**
|
||||
* Methods that need to be implemented to provide Portmap RPC program.
|
||||
* See RFC 1833 for details.
|
||||
*/
|
||||
public interface PortmapInterface {
|
||||
public enum Procedure {
|
||||
// the order of the values below are significant.
|
||||
PMAPPROC_NULL,
|
||||
PMAPPROC_SET,
|
||||
PMAPPROC_UNSET,
|
||||
PMAPPROC_GETPORT,
|
||||
PMAPPROC_DUMP,
|
||||
PMAPPROC_CALLIT,
|
||||
PMAPPROC_GETTIME,
|
||||
PMAPPROC_UADDR2TADDR,
|
||||
PMAPPROC_TADDR2UADDR,
|
||||
PMAPPROC_GETVERSADDR,
|
||||
PMAPPROC_INDIRECT,
|
||||
PMAPPROC_GETADDRLIST,
|
||||
PMAPPROC_GETSTAT;
|
||||
|
||||
public int getValue() {
|
||||
return ordinal();
|
||||
}
|
||||
|
||||
public static Procedure fromValue(int value) {
|
||||
if (value < 0 || value >= values().length) {
|
||||
return null;
|
||||
}
|
||||
return values()[value];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This procedure does no work. By convention, procedure zero of any protocol
|
||||
* takes no parameters and returns no results.
|
||||
*/
|
||||
public XDR nullOp(int xidd, XDR in, XDR out);
|
||||
|
||||
/**
|
||||
* When a program first becomes available on a machine, it registers itself
|
||||
* with the port mapper program on the same machine. The program passes its
|
||||
* program number "prog", version number "vers", transport protocol number
|
||||
* "prot", and the port "port" on which it awaits service request. The
|
||||
* procedure returns a boolean reply whose value is "TRUE" if the procedure
|
||||
* successfully established the mapping and "FALSE" otherwise. The procedure
|
||||
* refuses to establish a mapping if one already exists for the tuple
|
||||
* "(prog, vers, prot)".
|
||||
*/
|
||||
public XDR set(int xid, XDR in, XDR out);
|
||||
|
||||
/**
|
||||
* When a program becomes unavailable, it should unregister itself with the
|
||||
* port mapper program on the same machine. The parameters and results have
|
||||
* meanings identical to those of "PMAPPROC_SET". The protocol and port number
|
||||
* fields of the argument are ignored.
|
||||
*/
|
||||
public XDR unset(int xid, XDR in, XDR out);
|
||||
|
||||
/**
|
||||
* Given a program number "prog", version number "vers", and transport
|
||||
* protocol number "prot", this procedure returns the port number on which the
|
||||
* program is awaiting call requests. A port value of zeros means the program
|
||||
* has not been registered. The "port" field of the argument is ignored.
|
||||
*/
|
||||
public XDR getport(int xid, XDR in, XDR out);
|
||||
|
||||
/**
|
||||
* This procedure enumerates all entries in the port mapper's database. The
|
||||
* procedure takes no parameters and returns a list of program, version,
|
||||
* protocol, and port values.
|
||||
*/
|
||||
public XDR dump(int xid, XDR in, XDR out);
|
||||
}
|
|
@ -22,7 +22,6 @@ import org.apache.hadoop.oncrpc.RpcUtil;
|
|||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.CredentialsNone;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.apache.hadoop.portmap.PortmapInterface.Procedure;
|
||||
|
||||
/**
|
||||
* Helper utility for building portmap request
|
||||
|
@ -37,7 +36,7 @@ public class PortmapRequest {
|
|||
RpcCall call = RpcCall.getInstance(
|
||||
RpcUtil.getNewXid(String.valueOf(RpcProgramPortmap.PROGRAM)),
|
||||
RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION,
|
||||
Procedure.PMAPPROC_SET.getValue(), new CredentialsNone(),
|
||||
RpcProgramPortmap.PMAPPROC_SET, new CredentialsNone(),
|
||||
new VerifierNone());
|
||||
call.write(request);
|
||||
return mapping.serialize(request);
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.portmap;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -40,20 +40,26 @@ import org.jboss.netty.handler.timeout.IdleState;
|
|||
import org.jboss.netty.handler.timeout.IdleStateAwareChannelUpstreamHandler;
|
||||
import org.jboss.netty.handler.timeout.IdleStateEvent;
|
||||
|
||||
final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler implements PortmapInterface {
|
||||
final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler {
|
||||
static final int PROGRAM = 100000;
|
||||
static final int VERSION = 2;
|
||||
|
||||
static final int PMAPPROC_NULL = 0;
|
||||
static final int PMAPPROC_SET = 1;
|
||||
static final int PMAPPROC_UNSET = 2;
|
||||
static final int PMAPPROC_GETPORT = 3;
|
||||
static final int PMAPPROC_DUMP = 4;
|
||||
static final int PMAPPROC_GETVERSADDR = 9;
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(RpcProgramPortmap.class);
|
||||
|
||||
/** Map synchronized usis monitor lock of this instance */
|
||||
private final HashMap<String, PortmapMapping> map;
|
||||
private final ConcurrentHashMap<String, PortmapMapping> map = new ConcurrentHashMap<String, PortmapMapping>();
|
||||
|
||||
/** ChannelGroup that remembers all active channels for gracefully shutdown. */
|
||||
private final ChannelGroup allChannels;
|
||||
|
||||
RpcProgramPortmap(ChannelGroup allChannels) {
|
||||
this.allChannels = allChannels;
|
||||
map = new HashMap<String, PortmapMapping>(256);
|
||||
PortmapMapping m = new PortmapMapping(PROGRAM, VERSION,
|
||||
PortmapMapping.TRANSPORT_TCP, RpcProgram.RPCB_PORT);
|
||||
PortmapMapping m1 = new PortmapMapping(PROGRAM, VERSION,
|
||||
|
@ -62,47 +68,65 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler imple
|
|||
map.put(PortmapMapping.key(m1), m1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XDR nullOp(int xid, XDR in, XDR out) {
|
||||
/**
|
||||
* This procedure does no work. By convention, procedure zero of any protocol
|
||||
* takes no parameters and returns no results.
|
||||
*/
|
||||
private XDR nullOp(int xid, XDR in, XDR out) {
|
||||
return PortmapResponse.voidReply(out, xid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XDR set(int xid, XDR in, XDR out) {
|
||||
/**
|
||||
* When a program first becomes available on a machine, it registers itself
|
||||
* with the port mapper program on the same machine. The program passes its
|
||||
* program number "prog", version number "vers", transport protocol number
|
||||
* "prot", and the port "port" on which it awaits service request. The
|
||||
* procedure returns a boolean reply whose value is "TRUE" if the procedure
|
||||
* successfully established the mapping and "FALSE" otherwise. The procedure
|
||||
* refuses to establish a mapping if one already exists for the tuple
|
||||
* "(prog, vers, prot)".
|
||||
*/
|
||||
private XDR set(int xid, XDR in, XDR out) {
|
||||
PortmapMapping mapping = PortmapRequest.mapping(in);
|
||||
String key = PortmapMapping.key(mapping);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Portmap set key=" + key);
|
||||
}
|
||||
|
||||
PortmapMapping value = null;
|
||||
synchronized(this) {
|
||||
map.put(key, mapping);
|
||||
value = map.get(key);
|
||||
}
|
||||
return PortmapResponse.intReply(out, xid, value.getPort());
|
||||
return PortmapResponse.intReply(out, xid, mapping.getPort());
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized XDR unset(int xid, XDR in, XDR out) {
|
||||
/**
|
||||
* When a program becomes unavailable, it should unregister itself with the
|
||||
* port mapper program on the same machine. The parameters and results have
|
||||
* meanings identical to those of "PMAPPROC_SET". The protocol and port number
|
||||
* fields of the argument are ignored.
|
||||
*/
|
||||
private XDR unset(int xid, XDR in, XDR out) {
|
||||
PortmapMapping mapping = PortmapRequest.mapping(in);
|
||||
synchronized(this) {
|
||||
map.remove(PortmapMapping.key(mapping));
|
||||
}
|
||||
String key = PortmapMapping.key(mapping);
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Portmap remove key=" + key);
|
||||
|
||||
map.remove(key);
|
||||
return PortmapResponse.booleanReply(out, xid, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized XDR getport(int xid, XDR in, XDR out) {
|
||||
/**
|
||||
* Given a program number "prog", version number "vers", and transport
|
||||
* protocol number "prot", this procedure returns the port number on which the
|
||||
* program is awaiting call requests. A port value of zeros means the program
|
||||
* has not been registered. The "port" field of the argument is ignored.
|
||||
*/
|
||||
private XDR getport(int xid, XDR in, XDR out) {
|
||||
PortmapMapping mapping = PortmapRequest.mapping(in);
|
||||
String key = PortmapMapping.key(mapping);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Portmap GETPORT key=" + key + " " + mapping);
|
||||
}
|
||||
PortmapMapping value = null;
|
||||
synchronized(this) {
|
||||
value = map.get(key);
|
||||
}
|
||||
PortmapMapping value = map.get(key);
|
||||
int res = 0;
|
||||
if (value != null) {
|
||||
res = value.getPort();
|
||||
|
@ -115,13 +139,13 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler imple
|
|||
return PortmapResponse.intReply(out, xid, res);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized XDR dump(int xid, XDR in, XDR out) {
|
||||
PortmapMapping[] pmapList = null;
|
||||
synchronized(this) {
|
||||
pmapList = new PortmapMapping[map.values().size()];
|
||||
map.values().toArray(pmapList);
|
||||
}
|
||||
/**
|
||||
* This procedure enumerates all entries in the port mapper's database. The
|
||||
* procedure takes no parameters and returns a list of program, version,
|
||||
* protocol, and port values.
|
||||
*/
|
||||
private XDR dump(int xid, XDR in, XDR out) {
|
||||
PortmapMapping[] pmapList = map.values().toArray(new PortmapMapping[0]);
|
||||
return PortmapResponse.pmapList(out, xid, pmapList);
|
||||
}
|
||||
|
||||
|
@ -131,23 +155,23 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler imple
|
|||
|
||||
RpcInfo info = (RpcInfo) e.getMessage();
|
||||
RpcCall rpcCall = (RpcCall) info.header();
|
||||
final Procedure portmapProc = Procedure.fromValue(rpcCall.getProcedure());
|
||||
final int portmapProc = rpcCall.getProcedure();
|
||||
int xid = rpcCall.getXid();
|
||||
XDR in = new XDR(info.data().toByteBuffer().asReadOnlyBuffer(),
|
||||
XDR.State.READING);
|
||||
XDR out = new XDR();
|
||||
|
||||
if (portmapProc == Procedure.PMAPPROC_NULL) {
|
||||
if (portmapProc == PMAPPROC_NULL) {
|
||||
out = nullOp(xid, in, out);
|
||||
} else if (portmapProc == Procedure.PMAPPROC_SET) {
|
||||
} else if (portmapProc == PMAPPROC_SET) {
|
||||
out = set(xid, in, out);
|
||||
} else if (portmapProc == Procedure.PMAPPROC_UNSET) {
|
||||
} else if (portmapProc == PMAPPROC_UNSET) {
|
||||
out = unset(xid, in, out);
|
||||
} else if (portmapProc == Procedure.PMAPPROC_DUMP) {
|
||||
} else if (portmapProc == PMAPPROC_DUMP) {
|
||||
out = dump(xid, in, out);
|
||||
} else if (portmapProc == Procedure.PMAPPROC_GETPORT) {
|
||||
} else if (portmapProc == PMAPPROC_GETPORT) {
|
||||
out = getport(xid, in, out);
|
||||
} else if (portmapProc == Procedure.PMAPPROC_GETVERSADDR) {
|
||||
} else if (portmapProc == PMAPPROC_GETVERSADDR) {
|
||||
out = getport(xid, in, out);
|
||||
} else {
|
||||
LOG.info("PortmapHandler unknown rpc procedure=" + portmapProc);
|
||||
|
|
|
@ -23,7 +23,7 @@ import java.net.DatagramPacket;
|
|||
import java.net.DatagramSocket;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import junit.framework.Assert;
|
||||
|
||||
|
@ -80,7 +80,7 @@ public class TestPortmap {
|
|||
XDR req = new XDR();
|
||||
RpcCall.getInstance(++xid, RpcProgramPortmap.PROGRAM,
|
||||
RpcProgramPortmap.VERSION,
|
||||
PortmapInterface.Procedure.PMAPPROC_SET.getValue(),
|
||||
RpcProgramPortmap.PMAPPROC_SET,
|
||||
new CredentialsNone(), new VerifierNone()).write(req);
|
||||
|
||||
PortmapMapping sent = new PortmapMapping(90000, 1,
|
||||
|
@ -101,7 +101,7 @@ public class TestPortmap {
|
|||
Thread.sleep(100);
|
||||
boolean found = false;
|
||||
@SuppressWarnings("unchecked")
|
||||
HashMap<String, PortmapMapping> map = (HashMap<String, PortmapMapping>) Whitebox
|
||||
Map<String, PortmapMapping> map = (Map<String, PortmapMapping>) Whitebox
|
||||
.getInternalState(pm.getHandler(), "map");
|
||||
|
||||
for (PortmapMapping m : map.values()) {
|
||||
|
|
|
@ -709,14 +709,27 @@ class OpenFileCtx {
|
|||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check the commit status with the given offset
|
||||
* @param commitOffset the offset to commit
|
||||
* @param channel the channel to return response
|
||||
* @param xid the xid of the commit request
|
||||
* @param preOpAttr the preOp attribute
|
||||
* @param fromRead whether the commit is triggered from read request
|
||||
* @return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
|
||||
* COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR
|
||||
*/
|
||||
public COMMIT_STATUS checkCommit(DFSClient dfsClient, long commitOffset,
|
||||
Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
|
||||
Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
|
||||
if (!fromRead) {
|
||||
Preconditions.checkState(channel != null && preOpAttr != null);
|
||||
// Keep stream active
|
||||
updateLastAccessTime();
|
||||
}
|
||||
Preconditions.checkState(commitOffset >= 0);
|
||||
|
||||
COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
|
||||
preOpAttr);
|
||||
preOpAttr, fromRead);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Got commit status: " + ret.name());
|
||||
}
|
||||
|
@ -744,13 +757,9 @@ class OpenFileCtx {
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
|
||||
* COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR
|
||||
*/
|
||||
@VisibleForTesting
|
||||
synchronized COMMIT_STATUS checkCommitInternal(long commitOffset,
|
||||
Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
|
||||
Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
|
||||
if (!activeState) {
|
||||
if (pendingWrites.isEmpty()) {
|
||||
return COMMIT_STATUS.COMMIT_INACTIVE_CTX;
|
||||
|
@ -767,9 +776,11 @@ class OpenFileCtx {
|
|||
|
||||
if (commitOffset > 0) {
|
||||
if (commitOffset > flushed) {
|
||||
if (!fromRead) {
|
||||
CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid,
|
||||
preOpAttr);
|
||||
pendingCommits.put(commitOffset, commitCtx);
|
||||
}
|
||||
return COMMIT_STATUS.COMMIT_WAIT;
|
||||
} else {
|
||||
return COMMIT_STATUS.COMMIT_DO_SYNC;
|
||||
|
@ -784,11 +795,13 @@ class OpenFileCtx {
|
|||
// do a sync here though the output stream might be closed.
|
||||
return COMMIT_STATUS.COMMIT_FINISHED;
|
||||
} else {
|
||||
if (!fromRead) {
|
||||
// Insert commit
|
||||
long maxOffset = key.getKey().getMax() - 1;
|
||||
Preconditions.checkState(maxOffset > 0);
|
||||
CommitCtx commitCtx = new CommitCtx(maxOffset, channel, xid, preOpAttr);
|
||||
pendingCommits.put(maxOffset, commitCtx);
|
||||
}
|
||||
return COMMIT_STATUS.COMMIT_WAIT;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -628,6 +628,14 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
}
|
||||
}
|
||||
|
||||
// In case there is buffered data for the same file, flush it. This can be
|
||||
// optimized later by reading from the cache.
|
||||
int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
|
||||
if (ret != Nfs3Status.NFS3_OK) {
|
||||
LOG.warn("commitBeforeRead didn't succeed with ret=" + ret
|
||||
+ ". Read may not get most recent data.");
|
||||
}
|
||||
|
||||
try {
|
||||
int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count);
|
||||
byte[] readbuffer = new byte[buffSize];
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
package org.apache.hadoop.hdfs.nfs.nfs3;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -41,11 +40,9 @@ import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
|
|||
import org.apache.hadoop.nfs.nfs3.response.WccData;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
/**
|
||||
* Manage the writes and responds asynchronously.
|
||||
|
@ -207,6 +204,51 @@ public class WriteManager {
|
|||
return;
|
||||
}
|
||||
|
||||
// Do a possible commit before read request in case there is buffered data
|
||||
// inside DFSClient which has been flushed but not synced.
|
||||
int commitBeforeRead(DFSClient dfsClient, FileHandle fileHandle,
|
||||
long commitOffset) {
|
||||
int status;
|
||||
OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
|
||||
|
||||
if (openFileCtx == null) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("No opened stream for fileId:" + fileHandle.getFileId()
|
||||
+ " commitOffset=" + commitOffset
|
||||
+ ". Return success in this case.");
|
||||
}
|
||||
status = Nfs3Status.NFS3_OK;
|
||||
|
||||
} else {
|
||||
COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset,
|
||||
null, 0, null, true);
|
||||
switch (ret) {
|
||||
case COMMIT_FINISHED:
|
||||
case COMMIT_INACTIVE_CTX:
|
||||
status = Nfs3Status.NFS3_OK;
|
||||
break;
|
||||
case COMMIT_INACTIVE_WITH_PENDING_WRITE:
|
||||
case COMMIT_ERROR:
|
||||
status = Nfs3Status.NFS3ERR_IO;
|
||||
break;
|
||||
case COMMIT_WAIT:
|
||||
/**
|
||||
* This should happen rarely in some possible cases, such as read
|
||||
* request arrives before DFSClient is able to quickly flush data to DN,
|
||||
* or Prerequisite writes is not available. Won't wait since we don't
|
||||
* want to block read.
|
||||
*/
|
||||
status = Nfs3Status.NFS3ERR_JUKEBOX;
|
||||
break;
|
||||
default:
|
||||
LOG.error("Should not get commit return code:" + ret.name());
|
||||
throw new RuntimeException("Should not get commit return code:"
|
||||
+ ret.name());
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
void handleCommit(DFSClient dfsClient, FileHandle fileHandle,
|
||||
long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
|
||||
int status;
|
||||
|
@ -219,9 +261,8 @@ public class WriteManager {
|
|||
|
||||
} else {
|
||||
COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset,
|
||||
channel, xid, preOpAttr);
|
||||
channel, xid, preOpAttr, false);
|
||||
switch (ret) {
|
||||
case COMMIT_DO_SYNC:
|
||||
case COMMIT_FINISHED:
|
||||
case COMMIT_INACTIVE_CTX:
|
||||
status = Nfs3Status.NFS3_OK;
|
||||
|
@ -234,6 +275,7 @@ public class WriteManager {
|
|||
// Do nothing. Commit is async now.
|
||||
return;
|
||||
default:
|
||||
LOG.error("Should not get commit return code:" + ret.name());
|
||||
throw new RuntimeException("Should not get commit return code:"
|
||||
+ ret.name());
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.nfs.nfs3;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
|
@ -26,6 +27,7 @@ import java.nio.ByteBuffer;
|
|||
import java.util.Arrays;
|
||||
import java.util.concurrent.ConcurrentNavigableMap;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
|
@ -39,6 +41,7 @@ import org.apache.hadoop.nfs.nfs3.IdUserGroup;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.nfs.nfs3.request.CREATE3Request;
|
||||
import org.apache.hadoop.nfs.nfs3.request.READ3Request;
|
||||
import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
|
||||
|
@ -47,6 +50,7 @@ import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
|
|||
import org.apache.hadoop.nfs.nfs3.response.READ3Response;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.SecurityHandler;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
@ -139,32 +143,33 @@ public class TestWrites {
|
|||
|
||||
// Test inactive open file context
|
||||
ctx.setActiveStatusForTest(false);
|
||||
ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
|
||||
Channel ch = Mockito.mock(Channel.class);
|
||||
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
|
||||
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX);
|
||||
|
||||
ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
|
||||
new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
|
||||
ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
|
||||
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
|
||||
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE);
|
||||
|
||||
// Test request with non zero commit offset
|
||||
ctx.setActiveStatusForTest(true);
|
||||
Mockito.when(fos.getPos()).thenReturn((long) 10);
|
||||
COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr);
|
||||
COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
|
||||
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
|
||||
// Do_SYNC state will be updated to FINISHED after data sync
|
||||
ret = ctx.checkCommit(dfsClient, 5, null, 1, attr);
|
||||
ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, false);
|
||||
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
|
||||
|
||||
status = ctx.checkCommitInternal(10, null, 1, attr);
|
||||
status = ctx.checkCommitInternal(10, ch, 1, attr, false);
|
||||
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
|
||||
ret = ctx.checkCommit(dfsClient, 10, null, 1, attr);
|
||||
ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, false);
|
||||
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
|
||||
|
||||
ConcurrentNavigableMap<Long, CommitCtx> commits = ctx
|
||||
.getPendingCommitsForTest();
|
||||
Assert.assertTrue(commits.size() == 0);
|
||||
ret = ctx.checkCommit(dfsClient, 11, null, 1, attr);
|
||||
ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, false);
|
||||
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
|
||||
Assert.assertTrue(commits.size() == 1);
|
||||
long key = commits.firstKey();
|
||||
|
@ -173,7 +178,7 @@ public class TestWrites {
|
|||
// Test request with zero commit offset
|
||||
commits.remove(new Long(11));
|
||||
// There is one pending write [5,10]
|
||||
ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
|
||||
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
|
||||
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
|
||||
Assert.assertTrue(commits.size() == 1);
|
||||
key = commits.firstKey();
|
||||
|
@ -181,10 +186,79 @@ public class TestWrites {
|
|||
|
||||
// Empty pending writes
|
||||
ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
|
||||
ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
|
||||
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
|
||||
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
|
||||
}
|
||||
|
||||
@Test
|
||||
// Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS, which
|
||||
// includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX,
|
||||
// COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC.
|
||||
public void testCheckCommitFromRead() throws IOException {
|
||||
DFSClient dfsClient = Mockito.mock(DFSClient.class);
|
||||
Nfs3FileAttributes attr = new Nfs3FileAttributes();
|
||||
HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
|
||||
Mockito.when(fos.getPos()).thenReturn((long) 0);
|
||||
|
||||
OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
|
||||
new IdUserGroup());
|
||||
|
||||
FileHandle h = new FileHandle(1); // fake handle for "/dumpFilePath"
|
||||
COMMIT_STATUS ret;
|
||||
WriteManager wm = new WriteManager(new IdUserGroup(), new Configuration());
|
||||
assertTrue(wm.addOpenFileStream(h, ctx));
|
||||
|
||||
// Test inactive open file context
|
||||
ctx.setActiveStatusForTest(false);
|
||||
Channel ch = Mockito.mock(Channel.class);
|
||||
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
|
||||
assertEquals( COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret);
|
||||
assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
|
||||
|
||||
ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
|
||||
new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
|
||||
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
|
||||
assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE, ret);
|
||||
assertEquals(Nfs3Status.NFS3ERR_IO, wm.commitBeforeRead(dfsClient, h, 0));
|
||||
|
||||
// Test request with non zero commit offset
|
||||
ctx.setActiveStatusForTest(true);
|
||||
Mockito.when(fos.getPos()).thenReturn((long) 10);
|
||||
COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false);
|
||||
assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status);
|
||||
// Do_SYNC state will be updated to FINISHED after data sync
|
||||
ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, true);
|
||||
assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
|
||||
assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 5));
|
||||
|
||||
status = ctx.checkCommitInternal(10, ch, 1, attr, true);
|
||||
assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
|
||||
ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, true);
|
||||
assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
|
||||
assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 10));
|
||||
|
||||
ConcurrentNavigableMap<Long, CommitCtx> commits = ctx
|
||||
.getPendingCommitsForTest();
|
||||
assertTrue(commits.size() == 0);
|
||||
ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, true);
|
||||
assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret);
|
||||
assertEquals(0, commits.size()); // commit triggered by read doesn't wait
|
||||
assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 11));
|
||||
|
||||
// Test request with zero commit offset
|
||||
// There is one pending write [5,10]
|
||||
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
|
||||
assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret);
|
||||
assertEquals(0, commits.size());
|
||||
assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0));
|
||||
|
||||
// Empty pending writes
|
||||
ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
|
||||
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
|
||||
assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
|
||||
assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
|
||||
}
|
||||
|
||||
private void waitWrite(RpcProgramNfs3 nfsd, FileHandle handle, int maxWaitTime)
|
||||
throws InterruptedException {
|
||||
int waitedTime = 0;
|
||||
|
|
|
@ -212,6 +212,22 @@ Trunk (Unreleased)
|
|||
and INodeFileUnderConstructionWithSnapshot with FileUnderContructionFeature.
|
||||
(jing9 via szetszwo)
|
||||
|
||||
HDFS-5538. URLConnectionFactory should pick up the SSL related configuration
|
||||
by default. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5286. Flatten INodeDirectory hierarchy: Replace INodeDirectoryWithQuota
|
||||
with DirectoryWithQuotaFeature. (szetszwo)
|
||||
|
||||
HDFS-5556. Add some more NameNode cache statistics, cache pool stats
|
||||
(cmccabe)
|
||||
|
||||
HDFS-5545. Allow specifying endpoints for listeners in HttpServer. (Haohui
|
||||
Mai via jing9)
|
||||
|
||||
HDFS-5537. Remove FileWithSnapshot interface. (jing9 via szetszwo)
|
||||
|
||||
HDFS-5430. Support TTL on CacheDirectives. (wang)
|
||||
|
||||
OPTIMIZATIONS
|
||||
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
|
||||
|
||||
|
@ -399,6 +415,12 @@ Trunk (Unreleased)
|
|||
HDFS-5543. Fix narrow race condition in TestPathBasedCacheRequests
|
||||
(cmccabe)
|
||||
|
||||
HDFS-5565. CacheAdmin help should match against non-dashed commands
|
||||
(wang via cmccabe)
|
||||
|
||||
HDFS-5562. TestCacheDirectives and TestFsDatasetCache should stub out
|
||||
native mlock. (Colin McCabe and Akira Ajisaka via wang)
|
||||
|
||||
Release 2.3.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -536,6 +558,12 @@ Release 2.3.0 - UNRELEASED
|
|||
|
||||
HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5561. FSNameSystem#getNameJournalStatus() in JMX should return plain
|
||||
text instead of HTML. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5581. NameNodeFsck should use only one instance of
|
||||
BlockPlacementPolicy. (vinay via cmccabe)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
|
||||
|
@ -612,6 +640,9 @@ Release 2.3.0 - UNRELEASED
|
|||
HDFS-5552. Fix wrong information of "Cluster summay" in dfshealth.html.
|
||||
(Haohui Mai via jing9)
|
||||
|
||||
HDFS-5533. Symlink delete/create should be treated as DELETE/CREATE in snapshot diff
|
||||
report. (Binglin Chang via jing9)
|
||||
|
||||
Release 2.2.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -634,6 +665,8 @@ Release 2.2.1 - UNRELEASED
|
|||
|
||||
HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh)
|
||||
|
||||
HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -727,6 +760,13 @@ Release 2.2.1 - UNRELEASED
|
|||
|
||||
HDFS-5407. Fix typos in DFSClientCache (Haohui Mai via brandonli)
|
||||
|
||||
HDFS-5548. Use ConcurrentHashMap in portmap (Haohui Mai via brandonli)
|
||||
|
||||
HDFS-5577. NFS user guide update (brandonli)
|
||||
|
||||
HDFS-5563. NFS gateway should commit the buffered data when read request comes
|
||||
after write to the same file (brandonli)
|
||||
|
||||
Release 2.2.0 - 2013-10-13
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -4018,6 +4058,8 @@ Release 0.23.10 - UNRELEASED
|
|||
HDFS-4329. DFSShell issues with directories with spaces in name (Cristina
|
||||
L. Abad via jeagles)
|
||||
|
||||
HDFS-5526. Datanode cannot roll back to previous layout version (kihwal)
|
||||
|
||||
Release 0.23.9 - 2013-07-08
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -352,6 +352,11 @@
|
|||
<Method name="getReplication" />
|
||||
<Bug pattern="ICAST_QUESTIONABLE_UNSIGNED_RIGHT_SHIFT" />
|
||||
</Match>
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.hdfs.protocol.CacheDirective" />
|
||||
<Method name="insertInternal" />
|
||||
<Bug pattern="BC_UNCONFIRMED_CAST" />
|
||||
</Match>
|
||||
<!-- These two are used for shutting down and kicking the CRMon, do not need strong sync -->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor" />
|
||||
|
|
|
@ -109,6 +109,7 @@ import org.apache.hadoop.hdfs.client.ClientMmapManager;
|
|||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
||||
|
@ -2358,7 +2359,7 @@ public class DFSClient implements java.io.Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
|
||||
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
|
||||
checkOpen();
|
||||
try {
|
||||
return namenode.listCachePools("");
|
||||
|
|
|
@ -38,12 +38,15 @@ import java.net.InetSocketAddress;
|
|||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.security.SecureRandom;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.Date;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
|
@ -75,6 +78,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
|||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||
import org.apache.hadoop.http.HttpServer;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
@ -1427,4 +1431,79 @@ public class DFSUtil {
|
|||
return (value == null || value.isEmpty()) ?
|
||||
defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
|
||||
}
|
||||
|
||||
public static HttpServer.Builder loadSslConfToHttpServerBuilder(
|
||||
HttpServer.Builder builder, Configuration sslConf) {
|
||||
return builder
|
||||
.needsClientAuth(
|
||||
sslConf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
|
||||
DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
|
||||
.keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
|
||||
.keyStore(sslConf.get("ssl.server.keystore.location"),
|
||||
sslConf.get("ssl.server.keystore.password"),
|
||||
sslConf.get("ssl.server.keystore.type", "jks"))
|
||||
.trustStore(sslConf.get("ssl.server.truststore.location"),
|
||||
sslConf.get("ssl.server.truststore.password"),
|
||||
sslConf.get("ssl.server.truststore.type", "jks"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a Date into an ISO-8601 formatted datetime string.
|
||||
*/
|
||||
public static String dateToIso8601String(Date date) {
|
||||
SimpleDateFormat df =
|
||||
new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH);
|
||||
return df.format(date);
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a time duration in milliseconds into DDD:HH:MM:SS format.
|
||||
*/
|
||||
public static String durationToString(long durationMs) {
|
||||
Preconditions.checkArgument(durationMs >= 0, "Invalid negative duration");
|
||||
// Chop off the milliseconds
|
||||
long durationSec = durationMs / 1000;
|
||||
final int secondsPerMinute = 60;
|
||||
final int secondsPerHour = 60*60;
|
||||
final int secondsPerDay = 60*60*24;
|
||||
final long days = durationSec / secondsPerDay;
|
||||
durationSec -= days * secondsPerDay;
|
||||
final long hours = durationSec / secondsPerHour;
|
||||
durationSec -= hours * secondsPerHour;
|
||||
final long minutes = durationSec / secondsPerMinute;
|
||||
durationSec -= minutes * secondsPerMinute;
|
||||
final long seconds = durationSec;
|
||||
return String.format("%03d:%02d:%02d:%02d", days, hours, minutes, seconds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a relative time string into a duration in milliseconds.
|
||||
*/
|
||||
public static long parseRelativeTime(String relTime) throws IOException {
|
||||
if (relTime.length() < 2) {
|
||||
throw new IOException("Unable to parse relative time value of " + relTime
|
||||
+ ": too short");
|
||||
}
|
||||
String ttlString = relTime.substring(0, relTime.length()-1);
|
||||
int ttl;
|
||||
try {
|
||||
ttl = Integer.parseInt(ttlString);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IOException("Unable to parse relative time value of " + relTime
|
||||
+ ": " + ttlString + " is not a number");
|
||||
}
|
||||
if (relTime.endsWith("s")) {
|
||||
// pass
|
||||
} else if (relTime.endsWith("m")) {
|
||||
ttl *= 60;
|
||||
} else if (relTime.endsWith("h")) {
|
||||
ttl *= 60*60;
|
||||
} else if (relTime.endsWith("d")) {
|
||||
ttl *= 60*60*24;
|
||||
} else {
|
||||
throw new IOException("Unable to parse relative time value of " + relTime
|
||||
+ ": unknown time unit " + relTime.charAt(relTime.length() - 1));
|
||||
}
|
||||
return ttl*1000;
|
||||
}
|
||||
}
|
|
@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
|||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
|
@ -1713,12 +1714,12 @@ public class DistributedFileSystem extends FileSystem {
|
|||
/**
|
||||
* List all cache pools.
|
||||
*
|
||||
* @return A remote iterator from which you can get CachePoolInfo objects.
|
||||
* @return A remote iterator from which you can get CachePoolEntry objects.
|
||||
* Requests will be made as needed.
|
||||
* @throws IOException
|
||||
* If there was an error listing cache pools.
|
||||
*/
|
||||
public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
|
||||
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
|
||||
return dfs.listCachePools();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.fs.RemoteIterator;
|
|||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||
|
@ -213,12 +214,12 @@ public class HdfsAdmin {
|
|||
/**
|
||||
* List all cache pools.
|
||||
*
|
||||
* @return A remote iterator from which you can get CachePoolInfo objects.
|
||||
* @return A remote iterator from which you can get CachePoolEntry objects.
|
||||
* Requests will be made as needed.
|
||||
* @throws IOException
|
||||
* If there was an error listing cache pools.
|
||||
*/
|
||||
public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
|
||||
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
|
||||
return dfs.listCachePools();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,65 +17,94 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
import org.apache.commons.lang.builder.HashCodeBuilder;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.CachePool;
|
||||
import org.apache.hadoop.util.IntrusiveCollection;
|
||||
import org.apache.hadoop.util.IntrusiveCollection.Element;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* Represents an entry in the PathBasedCache on the NameNode.
|
||||
* Namenode class that tracks state related to a cached path.
|
||||
*
|
||||
* This is an implementation class, not part of the public API.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public final class CacheDirective {
|
||||
private final long entryId;
|
||||
public final class CacheDirective implements IntrusiveCollection.Element {
|
||||
private final long id;
|
||||
private final String path;
|
||||
private final short replication;
|
||||
private final CachePool pool;
|
||||
private CachePool pool;
|
||||
private final long expiryTime;
|
||||
|
||||
private long bytesNeeded;
|
||||
private long bytesCached;
|
||||
private long filesAffected;
|
||||
private Element prev;
|
||||
private Element next;
|
||||
|
||||
public CacheDirective(long entryId, String path,
|
||||
short replication, CachePool pool) {
|
||||
Preconditions.checkArgument(entryId > 0);
|
||||
this.entryId = entryId;
|
||||
public CacheDirective(long id, String path,
|
||||
short replication, long expiryTime) {
|
||||
Preconditions.checkArgument(id > 0);
|
||||
this.id = id;
|
||||
this.path = checkNotNull(path);
|
||||
Preconditions.checkArgument(replication > 0);
|
||||
this.path = path;
|
||||
Preconditions.checkNotNull(pool);
|
||||
this.replication = replication;
|
||||
Preconditions.checkNotNull(path);
|
||||
this.pool = pool;
|
||||
this.expiryTime = expiryTime;
|
||||
this.bytesNeeded = 0;
|
||||
this.bytesCached = 0;
|
||||
this.filesAffected = 0;
|
||||
}
|
||||
|
||||
public long getEntryId() {
|
||||
return entryId;
|
||||
public long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
public CachePool getPool() {
|
||||
return pool;
|
||||
}
|
||||
|
||||
public short getReplication() {
|
||||
return replication;
|
||||
}
|
||||
|
||||
public CacheDirectiveInfo toDirective() {
|
||||
public CachePool getPool() {
|
||||
return pool;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return When this directive expires, in milliseconds since Unix epoch
|
||||
*/
|
||||
public long getExpiryTime() {
|
||||
return expiryTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return When this directive expires, as an ISO-8601 formatted string.
|
||||
*/
|
||||
public String getExpiryTimeString() {
|
||||
return DFSUtil.dateToIso8601String(new Date(expiryTime));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link CacheDirectiveInfo} based on this CacheDirective.
|
||||
* <p>
|
||||
* This always sets an absolute expiry time, never a relative TTL.
|
||||
*/
|
||||
public CacheDirectiveInfo toInfo() {
|
||||
return new CacheDirectiveInfo.Builder().
|
||||
setId(entryId).
|
||||
setId(id).
|
||||
setPath(new Path(path)).
|
||||
setReplication(replication).
|
||||
setPool(pool.getPoolName()).
|
||||
setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiryTime)).
|
||||
build();
|
||||
}
|
||||
|
||||
|
@ -84,20 +113,22 @@ public final class CacheDirective {
|
|||
setBytesNeeded(bytesNeeded).
|
||||
setBytesCached(bytesCached).
|
||||
setFilesAffected(filesAffected).
|
||||
setHasExpired(new Date().getTime() > expiryTime).
|
||||
build();
|
||||
}
|
||||
|
||||
public CacheDirectiveEntry toEntry() {
|
||||
return new CacheDirectiveEntry(toDirective(), toStats());
|
||||
return new CacheDirectiveEntry(toInfo(), toStats());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
builder.append("{ entryId:").append(entryId).
|
||||
builder.append("{ id:").append(id).
|
||||
append(", path:").append(path).
|
||||
append(", replication:").append(replication).
|
||||
append(", pool:").append(pool).
|
||||
append(", expiryTime: ").append(getExpiryTimeString()).
|
||||
append(", bytesNeeded:").append(bytesNeeded).
|
||||
append(", bytesCached:").append(bytesCached).
|
||||
append(", filesAffected:").append(filesAffected).
|
||||
|
@ -113,12 +144,12 @@ public final class CacheDirective {
|
|||
return false;
|
||||
}
|
||||
CacheDirective other = (CacheDirective)o;
|
||||
return entryId == other.entryId;
|
||||
return id == other.id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return new HashCodeBuilder().append(entryId).toHashCode();
|
||||
return new HashCodeBuilder().append(id).toHashCode();
|
||||
}
|
||||
|
||||
public long getBytesNeeded() {
|
||||
|
@ -156,4 +187,55 @@ public final class CacheDirective {
|
|||
public void incrementFilesAffected() {
|
||||
this.filesAffected++;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override // IntrusiveCollection.Element
|
||||
public void insertInternal(IntrusiveCollection<? extends Element> list,
|
||||
Element prev, Element next) {
|
||||
assert this.pool == null;
|
||||
this.pool = ((CachePool.DirectiveList)list).getCachePool();
|
||||
this.prev = prev;
|
||||
this.next = next;
|
||||
}
|
||||
|
||||
@Override // IntrusiveCollection.Element
|
||||
public void setPrev(IntrusiveCollection<? extends Element> list, Element prev) {
|
||||
assert list == pool.getDirectiveList();
|
||||
this.prev = prev;
|
||||
}
|
||||
|
||||
@Override // IntrusiveCollection.Element
|
||||
public void setNext(IntrusiveCollection<? extends Element> list, Element next) {
|
||||
assert list == pool.getDirectiveList();
|
||||
this.next = next;
|
||||
}
|
||||
|
||||
@Override // IntrusiveCollection.Element
|
||||
public void removeInternal(IntrusiveCollection<? extends Element> list) {
|
||||
assert list == pool.getDirectiveList();
|
||||
this.pool = null;
|
||||
this.prev = null;
|
||||
this.next = null;
|
||||
}
|
||||
|
||||
@Override // IntrusiveCollection.Element
|
||||
public Element getPrev(IntrusiveCollection<? extends Element> list) {
|
||||
if (list != pool.getDirectiveList()) {
|
||||
return null;
|
||||
}
|
||||
return this.prev;
|
||||
}
|
||||
|
||||
@Override // IntrusiveCollection.Element
|
||||
public Element getNext(IntrusiveCollection<? extends Element> list) {
|
||||
if (list != pool.getDirectiveList()) {
|
||||
return null;
|
||||
}
|
||||
return this.next;
|
||||
}
|
||||
|
||||
@Override // IntrusiveCollection.Element
|
||||
public boolean isInList(IntrusiveCollection<? extends Element> list) {
|
||||
return pool == null ? false : list == pool.getDirectiveList();
|
||||
}
|
||||
};
|
||||
|
|
|
@ -17,11 +17,14 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
import org.apache.commons.lang.builder.EqualsBuilder;
|
||||
import org.apache.commons.lang.builder.HashCodeBuilder;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
|
||||
/**
|
||||
* Describes a path-based cache directive.
|
||||
|
@ -37,6 +40,7 @@ public class CacheDirectiveInfo {
|
|||
private Path path;
|
||||
private Short replication;
|
||||
private String pool;
|
||||
private Expiration expiration;
|
||||
|
||||
/**
|
||||
* Builds a new CacheDirectiveInfo populated with the set properties.
|
||||
|
@ -44,7 +48,7 @@ public class CacheDirectiveInfo {
|
|||
* @return New CacheDirectiveInfo.
|
||||
*/
|
||||
public CacheDirectiveInfo build() {
|
||||
return new CacheDirectiveInfo(id, path, replication, pool);
|
||||
return new CacheDirectiveInfo(id, path, replication, pool, expiration);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -62,6 +66,7 @@ public class CacheDirectiveInfo {
|
|||
this.path = directive.getPath();
|
||||
this.replication = directive.getReplication();
|
||||
this.pool = directive.getPool();
|
||||
this.expiration = directive.getExpiration();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -107,18 +112,134 @@ public class CacheDirectiveInfo {
|
|||
this.pool = pool;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets when the CacheDirective should expire. A
|
||||
* {@link CacheDirectiveInfo.Expiration} can specify either an absolute or
|
||||
* relative expiration time.
|
||||
*
|
||||
* @param expiration when this CacheDirective should expire
|
||||
* @return This builder, for call chaining
|
||||
*/
|
||||
public Builder setExpiration(Expiration expiration) {
|
||||
this.expiration = expiration;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Denotes a relative or absolute expiration time for a CacheDirective. Use
|
||||
* factory methods {@link CacheDirectiveInfo.Expiration#newAbsolute(Date)} and
|
||||
* {@link CacheDirectiveInfo.Expiration#newRelative(long)} to create an
|
||||
* Expiration.
|
||||
* <p>
|
||||
* In either case, the server-side clock is used to determine when a
|
||||
* CacheDirective expires.
|
||||
*/
|
||||
public static class Expiration {
|
||||
|
||||
/** Denotes a CacheDirectiveInfo that never expires **/
|
||||
public static final int EXPIRY_NEVER = -1;
|
||||
|
||||
/**
|
||||
* Create a new relative Expiration.
|
||||
*
|
||||
* @param ms how long until the CacheDirective expires, in milliseconds
|
||||
* @return A relative Expiration
|
||||
*/
|
||||
public static Expiration newRelative(long ms) {
|
||||
return new Expiration(ms, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new absolute Expiration.
|
||||
*
|
||||
* @param date when the CacheDirective expires
|
||||
* @return An absolute Expiration
|
||||
*/
|
||||
public static Expiration newAbsolute(Date date) {
|
||||
return new Expiration(date.getTime(), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new absolute Expiration.
|
||||
*
|
||||
* @param ms when the CacheDirective expires, in milliseconds since the Unix
|
||||
* epoch.
|
||||
* @return An absolute Expiration
|
||||
*/
|
||||
public static Expiration newAbsolute(long ms) {
|
||||
return new Expiration(ms, false);
|
||||
}
|
||||
|
||||
private final long ms;
|
||||
private final boolean isRelative;
|
||||
|
||||
private Expiration(long ms, boolean isRelative) {
|
||||
this.ms = ms;
|
||||
this.isRelative = isRelative;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if Expiration was specified as a relative duration, false if
|
||||
* specified as an absolute time.
|
||||
*/
|
||||
public boolean isRelative() {
|
||||
return isRelative;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The raw underlying millisecond value, either a relative duration
|
||||
* or an absolute time as milliseconds since the Unix epoch.
|
||||
*/
|
||||
public long getMillis() {
|
||||
return ms;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Expiration time as a {@link Date} object. This converts a
|
||||
* relative Expiration into an absolute Date based on the local
|
||||
* clock.
|
||||
*/
|
||||
public Date getAbsoluteDate() {
|
||||
return new Date(getAbsoluteMillis());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Expiration time in milliseconds from the Unix epoch. This
|
||||
* converts a relative Expiration into an absolute time based on the
|
||||
* local clock.
|
||||
*/
|
||||
public long getAbsoluteMillis() {
|
||||
if (!isRelative) {
|
||||
return ms;
|
||||
} else {
|
||||
return new Date().getTime() + ms;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
if (isRelative) {
|
||||
return DFSUtil.durationToString(ms);
|
||||
}
|
||||
return DFSUtil.dateToIso8601String(new Date(ms));
|
||||
}
|
||||
}
|
||||
|
||||
private final Long id;
|
||||
private final Path path;
|
||||
private final Short replication;
|
||||
private final String pool;
|
||||
private final Expiration expiration;
|
||||
|
||||
CacheDirectiveInfo(Long id, Path path, Short replication, String pool) {
|
||||
CacheDirectiveInfo(Long id, Path path, Short replication, String pool,
|
||||
Expiration expiration) {
|
||||
this.id = id;
|
||||
this.path = path;
|
||||
this.replication = replication;
|
||||
this.pool = pool;
|
||||
this.expiration = expiration;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -149,6 +270,13 @@ public class CacheDirectiveInfo {
|
|||
return pool;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return When this directive expires.
|
||||
*/
|
||||
public Expiration getExpiration() {
|
||||
return expiration;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (o == null) {
|
||||
|
@ -162,6 +290,7 @@ public class CacheDirectiveInfo {
|
|||
append(getPath(), other.getPath()).
|
||||
append(getReplication(), other.getReplication()).
|
||||
append(getPool(), other.getPool()).
|
||||
append(getExpiration(), other.getExpiration()).
|
||||
isEquals();
|
||||
}
|
||||
|
||||
|
@ -171,6 +300,7 @@ public class CacheDirectiveInfo {
|
|||
append(path).
|
||||
append(replication).
|
||||
append(pool).
|
||||
append(expiration).
|
||||
hashCode();
|
||||
}
|
||||
|
||||
|
@ -195,6 +325,10 @@ public class CacheDirectiveInfo {
|
|||
builder.append(prefix).append("pool: ").append(pool);
|
||||
prefix = ", ";
|
||||
}
|
||||
if (expiration != null) {
|
||||
builder.append(prefix).append("expiration: ").append(expiration);
|
||||
prefix = ", ";
|
||||
}
|
||||
builder.append("}");
|
||||
return builder.toString();
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ public class CacheDirectiveStats {
|
|||
private long bytesNeeded;
|
||||
private long bytesCached;
|
||||
private long filesAffected;
|
||||
private boolean hasExpired;
|
||||
|
||||
/**
|
||||
* Builds a new CacheDirectiveStats populated with the set properties.
|
||||
|
@ -37,7 +38,8 @@ public class CacheDirectiveStats {
|
|||
* @return New CacheDirectiveStats.
|
||||
*/
|
||||
public CacheDirectiveStats build() {
|
||||
return new CacheDirectiveStats(bytesNeeded, bytesCached, filesAffected);
|
||||
return new CacheDirectiveStats(bytesNeeded, bytesCached, filesAffected,
|
||||
hasExpired);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -52,7 +54,7 @@ public class CacheDirectiveStats {
|
|||
* @param bytesNeeded The bytes needed.
|
||||
* @return This builder, for call chaining.
|
||||
*/
|
||||
public Builder setBytesNeeded(Long bytesNeeded) {
|
||||
public Builder setBytesNeeded(long bytesNeeded) {
|
||||
this.bytesNeeded = bytesNeeded;
|
||||
return this;
|
||||
}
|
||||
|
@ -63,7 +65,7 @@ public class CacheDirectiveStats {
|
|||
* @param bytesCached The bytes cached.
|
||||
* @return This builder, for call chaining.
|
||||
*/
|
||||
public Builder setBytesCached(Long bytesCached) {
|
||||
public Builder setBytesCached(long bytesCached) {
|
||||
this.bytesCached = bytesCached;
|
||||
return this;
|
||||
}
|
||||
|
@ -74,44 +76,64 @@ public class CacheDirectiveStats {
|
|||
* @param filesAffected The files affected.
|
||||
* @return This builder, for call chaining.
|
||||
*/
|
||||
public Builder setFilesAffected(Long filesAffected) {
|
||||
public Builder setFilesAffected(long filesAffected) {
|
||||
this.filesAffected = filesAffected;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets whether this directive has expired.
|
||||
*
|
||||
* @param hasExpired if this directive has expired
|
||||
* @return This builder, for call chaining.
|
||||
*/
|
||||
public Builder setHasExpired(boolean hasExpired) {
|
||||
this.hasExpired = hasExpired;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
private final long bytesNeeded;
|
||||
private final long bytesCached;
|
||||
private final long filesAffected;
|
||||
private final boolean hasExpired;
|
||||
|
||||
private CacheDirectiveStats(long bytesNeeded, long bytesCached,
|
||||
long filesAffected) {
|
||||
long filesAffected, boolean hasExpired) {
|
||||
this.bytesNeeded = bytesNeeded;
|
||||
this.bytesCached = bytesCached;
|
||||
this.filesAffected = filesAffected;
|
||||
this.hasExpired = hasExpired;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The bytes needed.
|
||||
*/
|
||||
public Long getBytesNeeded() {
|
||||
public long getBytesNeeded() {
|
||||
return bytesNeeded;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The bytes cached.
|
||||
*/
|
||||
public Long getBytesCached() {
|
||||
public long getBytesCached() {
|
||||
return bytesCached;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The files affected.
|
||||
*/
|
||||
public Long getFilesAffected() {
|
||||
public long getFilesAffected() {
|
||||
return filesAffected;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Whether this directive has expired.
|
||||
*/
|
||||
public boolean hasExpired() {
|
||||
return hasExpired;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
|
@ -119,6 +141,7 @@ public class CacheDirectiveStats {
|
|||
builder.append("bytesNeeded: ").append(bytesNeeded);
|
||||
builder.append(", ").append("bytesCached: ").append(bytesCached);
|
||||
builder.append(", ").append("filesAffected: ").append(filesAffected);
|
||||
builder.append(", ").append("hasExpired: ").append(hasExpired);
|
||||
builder.append("}");
|
||||
return builder.toString();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* Describes a Cache Pool entry.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class CachePoolEntry {
|
||||
private final CachePoolInfo info;
|
||||
private final CachePoolStats stats;
|
||||
|
||||
public CachePoolEntry(CachePoolInfo info, CachePoolStats stats) {
|
||||
this.info = info;
|
||||
this.stats = stats;
|
||||
}
|
||||
|
||||
public CachePoolInfo getInfo() {
|
||||
return info;
|
||||
}
|
||||
|
||||
public CachePoolStats getStats() {
|
||||
return stats;
|
||||
}
|
||||
}
|
|
@ -30,6 +30,7 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.InvalidRequestException;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
|
||||
|
@ -150,7 +151,10 @@ public class CachePoolInfo {
|
|||
|
||||
public static void validate(CachePoolInfo info) throws IOException {
|
||||
if (info == null) {
|
||||
throw new IOException("CachePoolInfo is null");
|
||||
throw new InvalidRequestException("CachePoolInfo is null");
|
||||
}
|
||||
if ((info.getWeight() != null) && (info.getWeight() < 0)) {
|
||||
throw new InvalidRequestException("CachePool weight is negative.");
|
||||
}
|
||||
validateName(info.poolName);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* CachePoolStats describes cache pool statistics.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class CachePoolStats {
|
||||
public static class Builder {
|
||||
private long bytesNeeded;
|
||||
private long bytesCached;
|
||||
private long filesAffected;
|
||||
|
||||
public Builder() {
|
||||
}
|
||||
|
||||
public Builder setBytesNeeded(long bytesNeeded) {
|
||||
this.bytesNeeded = bytesNeeded;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setBytesCached(long bytesCached) {
|
||||
this.bytesCached = bytesCached;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setFilesAffected(long filesAffected) {
|
||||
this.filesAffected = filesAffected;
|
||||
return this;
|
||||
}
|
||||
|
||||
public CachePoolStats build() {
|
||||
return new CachePoolStats(bytesNeeded, bytesCached, filesAffected);
|
||||
}
|
||||
};
|
||||
|
||||
private final long bytesNeeded;
|
||||
private final long bytesCached;
|
||||
private final long filesAffected;
|
||||
|
||||
private CachePoolStats(long bytesNeeded, long bytesCached, long filesAffected) {
|
||||
this.bytesNeeded = bytesNeeded;
|
||||
this.bytesCached = bytesCached;
|
||||
this.filesAffected = filesAffected;
|
||||
}
|
||||
|
||||
public long getBytesNeeded() {
|
||||
return bytesNeeded;
|
||||
}
|
||||
|
||||
public long getBytesCached() {
|
||||
return bytesNeeded;
|
||||
}
|
||||
|
||||
public long getFilesAffected() {
|
||||
return filesAffected;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return new StringBuilder().append("{").
|
||||
append("bytesNeeded:").append(bytesNeeded).
|
||||
append(", bytesCached:").append(bytesCached).
|
||||
append(", filesAffected:").append(filesAffected).
|
||||
append("}").toString();
|
||||
}
|
||||
}
|
|
@ -1179,6 +1179,6 @@ public interface ClientProtocol {
|
|||
* @return A RemoteIterator which returns CachePool objects.
|
||||
*/
|
||||
@Idempotent
|
||||
public RemoteIterator<CachePoolInfo> listCachePools(String prevPool)
|
||||
public RemoteIterator<CachePoolEntry> listCachePools(String prevPool)
|
||||
throws IOException;
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.fs.RemoteIterator;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
|
@ -51,6 +52,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowS
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
|
||||
|
@ -103,7 +106,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
|
||||
|
@ -1141,18 +1143,15 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
public ListCachePoolsResponseProto listCachePools(RpcController controller,
|
||||
ListCachePoolsRequestProto request) throws ServiceException {
|
||||
try {
|
||||
RemoteIterator<CachePoolInfo> iter =
|
||||
RemoteIterator<CachePoolEntry> iter =
|
||||
server.listCachePools(request.getPrevPoolName());
|
||||
ListCachePoolsResponseProto.Builder responseBuilder =
|
||||
ListCachePoolsResponseProto.newBuilder();
|
||||
String prevPoolName = null;
|
||||
while (iter.hasNext()) {
|
||||
CachePoolInfo pool = iter.next();
|
||||
ListCachePoolsResponseElementProto.Builder elemBuilder =
|
||||
ListCachePoolsResponseElementProto.newBuilder();
|
||||
elemBuilder.setInfo(PBHelper.convert(pool));
|
||||
responseBuilder.addElements(elemBuilder.build());
|
||||
prevPoolName = pool.getPoolName();
|
||||
CachePoolEntry entry = iter.next();
|
||||
responseBuilder.addEntries(PBHelper.convert(entry));
|
||||
prevPoolName = entry.getInfo().getPoolName();
|
||||
}
|
||||
// fill in hasNext
|
||||
if (prevPoolName == null) {
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
|||
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
||||
|
@ -61,6 +62,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCac
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
|
||||
|
@ -96,7 +98,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
|
||||
|
@ -1141,23 +1142,23 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
}
|
||||
}
|
||||
|
||||
private static class BatchedCachePoolInfo
|
||||
implements BatchedEntries<CachePoolInfo> {
|
||||
private static class BatchedCachePoolEntries
|
||||
implements BatchedEntries<CachePoolEntry> {
|
||||
private final ListCachePoolsResponseProto proto;
|
||||
|
||||
public BatchedCachePoolInfo(ListCachePoolsResponseProto proto) {
|
||||
public BatchedCachePoolEntries(ListCachePoolsResponseProto proto) {
|
||||
this.proto = proto;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CachePoolInfo get(int i) {
|
||||
ListCachePoolsResponseElementProto elem = proto.getElements(i);
|
||||
return PBHelper.convert(elem.getInfo());
|
||||
public CachePoolEntry get(int i) {
|
||||
CachePoolEntryProto elem = proto.getEntries(i);
|
||||
return PBHelper.convert(elem);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
return proto.getElementsCount();
|
||||
return proto.getEntriesCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1167,17 +1168,17 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
}
|
||||
|
||||
private class CachePoolIterator
|
||||
extends BatchedRemoteIterator<String, CachePoolInfo> {
|
||||
extends BatchedRemoteIterator<String, CachePoolEntry> {
|
||||
|
||||
public CachePoolIterator(String prevKey) {
|
||||
super(prevKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BatchedEntries<CachePoolInfo> makeRequest(String prevKey)
|
||||
public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
|
||||
throws IOException {
|
||||
try {
|
||||
return new BatchedCachePoolInfo(
|
||||
return new BatchedCachePoolEntries(
|
||||
rpcProxy.listCachePools(null,
|
||||
ListCachePoolsRequestProto.newBuilder().
|
||||
setPrevPoolName(prevKey).build()));
|
||||
|
@ -1187,13 +1188,13 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public String elementToPrevKey(CachePoolInfo element) {
|
||||
return element.getPoolName();
|
||||
public String elementToPrevKey(CachePoolEntry entry) {
|
||||
return entry.getInfo().getPoolName();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public RemoteIterator<CachePoolInfo> listCachePools(String prevKey)
|
||||
public RemoteIterator<CachePoolEntry> listCachePools(String prevKey)
|
||||
throws IOException {
|
||||
return new CachePoolIterator(prevKey);
|
||||
}
|
||||
|
|
|
@ -39,7 +39,9 @@ import org.apache.hadoop.hdfs.StorageType;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolStats;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
|
@ -60,8 +62,11 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
|
|||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
|
||||
|
@ -1698,6 +1703,9 @@ public class PBHelper {
|
|||
if (info.getPool() != null) {
|
||||
builder.setPool(info.getPool());
|
||||
}
|
||||
if (info.getExpiration() != null) {
|
||||
builder.setExpiration(convert(info.getExpiration()));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
@ -1718,15 +1726,35 @@ public class PBHelper {
|
|||
if (proto.hasPool()) {
|
||||
builder.setPool(proto.getPool());
|
||||
}
|
||||
if (proto.hasExpiration()) {
|
||||
builder.setExpiration(convert(proto.getExpiration()));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static CacheDirectiveInfoExpirationProto convert(
|
||||
CacheDirectiveInfo.Expiration expiration) {
|
||||
return CacheDirectiveInfoExpirationProto.newBuilder()
|
||||
.setIsRelative(expiration.isRelative())
|
||||
.setMillis(expiration.getMillis())
|
||||
.build();
|
||||
}
|
||||
|
||||
public static CacheDirectiveInfo.Expiration convert(
|
||||
CacheDirectiveInfoExpirationProto proto) {
|
||||
if (proto.getIsRelative()) {
|
||||
return CacheDirectiveInfo.Expiration.newRelative(proto.getMillis());
|
||||
}
|
||||
return CacheDirectiveInfo.Expiration.newAbsolute(proto.getMillis());
|
||||
}
|
||||
|
||||
public static CacheDirectiveStatsProto convert(CacheDirectiveStats stats) {
|
||||
CacheDirectiveStatsProto.Builder builder =
|
||||
CacheDirectiveStatsProto.newBuilder();
|
||||
builder.setBytesNeeded(stats.getBytesNeeded());
|
||||
builder.setBytesCached(stats.getBytesCached());
|
||||
builder.setFilesAffected(stats.getFilesAffected());
|
||||
builder.setHasExpired(stats.hasExpired());
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
@ -1735,6 +1763,7 @@ public class PBHelper {
|
|||
builder.setBytesNeeded(proto.getBytesNeeded());
|
||||
builder.setBytesCached(proto.getBytesCached());
|
||||
builder.setFilesAffected(proto.getFilesAffected());
|
||||
builder.setHasExpired(proto.getHasExpired());
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
@ -1789,6 +1818,35 @@ public class PBHelper {
|
|||
return info;
|
||||
}
|
||||
|
||||
public static CachePoolStatsProto convert(CachePoolStats stats) {
|
||||
CachePoolStatsProto.Builder builder = CachePoolStatsProto.newBuilder();
|
||||
builder.setBytesNeeded(stats.getBytesNeeded());
|
||||
builder.setBytesCached(stats.getBytesCached());
|
||||
builder.setFilesAffected(stats.getFilesAffected());
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static CachePoolStats convert (CachePoolStatsProto proto) {
|
||||
CachePoolStats.Builder builder = new CachePoolStats.Builder();
|
||||
builder.setBytesNeeded(proto.getBytesNeeded());
|
||||
builder.setBytesCached(proto.getBytesCached());
|
||||
builder.setFilesAffected(proto.getFilesAffected());
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static CachePoolEntryProto convert(CachePoolEntry entry) {
|
||||
CachePoolEntryProto.Builder builder = CachePoolEntryProto.newBuilder();
|
||||
builder.setInfo(PBHelper.convert(entry.getInfo()));
|
||||
builder.setStats(PBHelper.convert(entry.getStats()));
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static CachePoolEntry convert (CachePoolEntryProto proto) {
|
||||
CachePoolInfo info = PBHelper.convert(proto.getInfo());
|
||||
CachePoolStats stats = PBHelper.convert(proto.getStats());
|
||||
return new CachePoolEntry(info, stats);
|
||||
}
|
||||
|
||||
public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) {
|
||||
return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
|
||||
}
|
||||
|
|
|
@ -150,5 +150,5 @@ interface AsyncLogger {
|
|||
* Append an HTML-formatted report for this logger's status to the provided
|
||||
* StringBuilder. This is displayed on the NN web UI.
|
||||
*/
|
||||
public void appendHtmlReport(StringBuilder sb);
|
||||
public void appendReport(StringBuilder sb);
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRe
|
|||
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.jasper.compiler.JspUtil;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Joiner;
|
||||
|
@ -177,17 +176,16 @@ class AsyncLoggerSet {
|
|||
* state of the underlying loggers.
|
||||
* @param sb the StringBuilder to append to
|
||||
*/
|
||||
void appendHtmlReport(StringBuilder sb) {
|
||||
sb.append("<table class=\"storage\">");
|
||||
sb.append("<thead><tr><td>JN</td><td>Status</td></tr></thead>\n");
|
||||
for (AsyncLogger l : loggers) {
|
||||
sb.append("<tr>");
|
||||
sb.append("<td>" + JspUtil.escapeXml(l.toString()) + "</td>");
|
||||
sb.append("<td>");
|
||||
l.appendHtmlReport(sb);
|
||||
sb.append("</td></tr>\n");
|
||||
void appendReport(StringBuilder sb) {
|
||||
for (int i = 0, len = loggers.size(); i < len; ++i) {
|
||||
AsyncLogger l = loggers.get(i);
|
||||
if (i != 0) {
|
||||
sb.append(", ");
|
||||
}
|
||||
sb.append(l).append(" (");
|
||||
l.appendReport(sb);
|
||||
sb.append(")");
|
||||
}
|
||||
sb.append("</table>");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -569,7 +569,7 @@ public class IPCLoggerChannel implements AsyncLogger {
|
|||
}
|
||||
|
||||
@Override
|
||||
public synchronized void appendHtmlReport(StringBuilder sb) {
|
||||
public synchronized void appendReport(StringBuilder sb) {
|
||||
sb.append("Written txid ").append(highestAckedTxId);
|
||||
long behind = getLagTxns();
|
||||
if (behind > 0) {
|
||||
|
|
|
@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.namenode.JournalSet;
|
|||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
|
@ -87,6 +88,7 @@ public class QuorumJournalManager implements JournalManager {
|
|||
private final AsyncLoggerSet loggers;
|
||||
|
||||
private int outputBufferCapacity = 512 * 1024;
|
||||
private final URLConnectionFactory connectionFactory;
|
||||
|
||||
public QuorumJournalManager(Configuration conf,
|
||||
URI uri, NamespaceInfo nsInfo) throws IOException {
|
||||
|
@ -102,6 +104,8 @@ public class QuorumJournalManager implements JournalManager {
|
|||
this.uri = uri;
|
||||
this.nsInfo = nsInfo;
|
||||
this.loggers = new AsyncLoggerSet(createLoggers(loggerFactory));
|
||||
this.connectionFactory = URLConnectionFactory
|
||||
.newDefaultURLConnectionFactory(conf);
|
||||
|
||||
// Configure timeouts.
|
||||
this.startSegmentTimeoutMs = conf.getInt(
|
||||
|
@ -475,8 +479,8 @@ public class QuorumJournalManager implements JournalManager {
|
|||
URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId());
|
||||
|
||||
EditLogInputStream elis = EditLogFileInputStream.fromUrl(
|
||||
url, remoteLog.getStartTxId(), remoteLog.getEndTxId(),
|
||||
remoteLog.isInProgress());
|
||||
connectionFactory, url, remoteLog.getStartTxId(),
|
||||
remoteLog.getEndTxId(), remoteLog.isInProgress());
|
||||
allStreams.add(elis);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,10 +114,10 @@ class QuorumOutputStream extends EditLogOutputStream {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String generateHtmlReport() {
|
||||
public String generateReport() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("Writing segment beginning at txid " + segmentTxId + "<br/>\n");
|
||||
loggers.appendHtmlReport(sb);
|
||||
sb.append("Writing segment beginning at txid " + segmentTxId + ". \n");
|
||||
loggers.appendReport(sb);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_INTERNAL_SPNE
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
|
||||
import javax.servlet.ServletContext;
|
||||
|
||||
|
@ -69,8 +71,15 @@ public class JournalNodeHttpServer {
|
|||
bindAddr.getHostName()));
|
||||
|
||||
int tmpInfoPort = bindAddr.getPort();
|
||||
URI httpEndpoint;
|
||||
try {
|
||||
httpEndpoint = new URI("http://" + NetUtils.getHostPortString(bindAddr));
|
||||
} catch (URISyntaxException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
|
||||
httpServer = new HttpServer.Builder().setName("journal")
|
||||
.setBindAddress(bindAddr.getHostName()).setPort(tmpInfoPort)
|
||||
.addEndpoint(httpEndpoint)
|
||||
.setFindPort(tmpInfoPort == 0).setConf(conf).setACL(
|
||||
new AccessControlList(conf.get(DFS_ADMIN, " ")))
|
||||
.setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
|
||||
|
@ -85,7 +94,7 @@ public class JournalNodeHttpServer {
|
|||
httpServer.start();
|
||||
|
||||
// The web-server port can be ephemeral... ensure we have the correct info
|
||||
infoPort = httpServer.getPort();
|
||||
infoPort = httpServer.getConnectorAddress(0).getPort();
|
||||
|
||||
LOG.info("Journal Web-server up at: " + bindAddr + ":" + infoPort);
|
||||
}
|
||||
|
@ -104,7 +113,7 @@ public class JournalNodeHttpServer {
|
|||
* Return the actual address bound to by the running server.
|
||||
*/
|
||||
public InetSocketAddress getAddress() {
|
||||
InetSocketAddress addr = httpServer.getListenerAddress();
|
||||
InetSocketAddress addr = httpServer.getConnectorAddress(0);
|
||||
assert addr.getPort() != 0;
|
||||
return addr;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Date;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
@ -208,17 +209,27 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
|||
/**
|
||||
* Scan all CacheDirectives. Use the information to figure out
|
||||
* what cache replication factor each block should have.
|
||||
*
|
||||
* @param mark Whether the current scan is setting or clearing the mark
|
||||
*/
|
||||
private void rescanCacheDirectives() {
|
||||
FSDirectory fsDir = namesystem.getFSDirectory();
|
||||
for (CacheDirective pce : cacheManager.getEntriesById().values()) {
|
||||
final long now = new Date().getTime();
|
||||
for (CacheDirective directive : cacheManager.getEntriesById().values()) {
|
||||
// Reset the directive
|
||||
directive.clearBytesNeeded();
|
||||
directive.clearBytesCached();
|
||||
directive.clearFilesAffected();
|
||||
// Skip processing this entry if it has expired
|
||||
LOG.info("Directive expiry is at " + directive.getExpiryTime());
|
||||
if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Skipping directive id " + directive.getId()
|
||||
+ " because it has expired (" + directive.getExpiryTime() + ">="
|
||||
+ now);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
scannedDirectives++;
|
||||
pce.clearBytesNeeded();
|
||||
pce.clearBytesCached();
|
||||
pce.clearFilesAffected();
|
||||
String path = pce.getPath();
|
||||
String path = directive.getPath();
|
||||
INode node;
|
||||
try {
|
||||
node = fsDir.getINode(path);
|
||||
|
@ -235,11 +246,11 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
|||
ReadOnlyList<INode> children = dir.getChildrenList(null);
|
||||
for (INode child : children) {
|
||||
if (child.isFile()) {
|
||||
rescanFile(pce, child.asFile());
|
||||
rescanFile(directive, child.asFile());
|
||||
}
|
||||
}
|
||||
} else if (node.isFile()) {
|
||||
rescanFile(pce, node.asFile());
|
||||
rescanFile(directive, node.asFile());
|
||||
} else {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Ignoring non-directory, non-file inode " + node +
|
||||
|
@ -301,7 +312,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
|
|||
pce.addBytesNeeded(neededTotal);
|
||||
pce.addBytesCached(cachedTotal);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.debug("Directive " + pce.getEntryId() + " is caching " +
|
||||
LOG.debug("Directive " + pce.getId() + " is caching " +
|
||||
file.getFullPathName() + ": " + cachedTotal + "/" + neededTotal);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,12 @@ public interface DatanodeStatistics {
|
|||
/** @return the percentage of the block pool used space over the total capacity. */
|
||||
public float getPercentBlockPoolUsed();
|
||||
|
||||
/** @return the total cache capacity of all DataNodes */
|
||||
public long getCacheCapacity();
|
||||
|
||||
/** @return the total cache used by all DataNodes */
|
||||
public long getCacheUsed();
|
||||
|
||||
/** @return the xceiver count */
|
||||
public int getXceiverCount();
|
||||
|
||||
|
|
|
@ -150,6 +150,17 @@ class HeartbeatManager implements DatanodeStatistics {
|
|||
return stats.xceiverCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized long getCacheCapacity() {
|
||||
return stats.cacheCapacity;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized long getCacheUsed() {
|
||||
return stats.cacheUsed;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public synchronized long[] getStats() {
|
||||
return new long[] {getCapacityTotal(),
|
||||
|
@ -309,6 +320,8 @@ class HeartbeatManager implements DatanodeStatistics {
|
|||
private long capacityRemaining = 0L;
|
||||
private long blockPoolUsed = 0L;
|
||||
private int xceiverCount = 0;
|
||||
private long cacheCapacity = 0L;
|
||||
private long cacheUsed = 0L;
|
||||
|
||||
private int expiredHeartbeats = 0;
|
||||
|
||||
|
@ -322,6 +335,8 @@ class HeartbeatManager implements DatanodeStatistics {
|
|||
} else {
|
||||
capacityTotal += node.getDfsUsed();
|
||||
}
|
||||
cacheCapacity += node.getCacheCapacity();
|
||||
cacheUsed += node.getCacheUsed();
|
||||
}
|
||||
|
||||
private void subtract(final DatanodeDescriptor node) {
|
||||
|
@ -334,6 +349,8 @@ class HeartbeatManager implements DatanodeStatistics {
|
|||
} else {
|
||||
capacityTotal -= node.getDfsUsed();
|
||||
}
|
||||
cacheCapacity -= node.getCacheCapacity();
|
||||
cacheUsed -= node.getCacheUsed();
|
||||
}
|
||||
|
||||
/** Increment expired heartbeat counter. */
|
||||
|
|
|
@ -52,6 +52,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
|
||||
import javax.management.ObjectName;
|
||||
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
@ -234,6 +235,7 @@ public class DataNode extends Configured
|
|||
private volatile boolean heartbeatsDisabledForTests = false;
|
||||
private DataStorage storage = null;
|
||||
private HttpServer infoServer = null;
|
||||
private int infoPort;
|
||||
private int infoSecurePort;
|
||||
DataNodeMetrics metrics;
|
||||
private InetSocketAddress streamingAddr;
|
||||
|
@ -354,27 +356,33 @@ public class DataNode extends Configured
|
|||
String infoHost = infoSocAddr.getHostName();
|
||||
int tmpInfoPort = infoSocAddr.getPort();
|
||||
HttpServer.Builder builder = new HttpServer.Builder().setName("datanode")
|
||||
.setBindAddress(infoHost).setPort(tmpInfoPort)
|
||||
.addEndpoint(URI.create("http://" + NetUtils.getHostPortString(infoSocAddr)))
|
||||
.setFindPort(tmpInfoPort == 0).setConf(conf)
|
||||
.setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
|
||||
this.infoServer = (secureResources == null) ? builder.build() :
|
||||
builder.setConnector(secureResources.getListener()).build();
|
||||
|
||||
LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
|
||||
if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
|
||||
boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
|
||||
DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
|
||||
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
|
||||
DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0));
|
||||
Configuration sslConf = new HdfsConfiguration(false);
|
||||
sslConf.addResource(conf.get(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||
"ssl-server.xml"));
|
||||
this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
|
||||
builder.addEndpoint(URI.create("https://"
|
||||
+ NetUtils.getHostPortString(secInfoSocAddr)));
|
||||
Configuration sslConf = new Configuration(false);
|
||||
sslConf.setBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf
|
||||
.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
|
||||
DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
|
||||
sslConf.addResource(conf.get(
|
||||
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
|
||||
DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
|
||||
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Datanode listening for SSL on " + secInfoSocAddr);
|
||||
}
|
||||
infoSecurePort = secInfoSocAddr.getPort();
|
||||
}
|
||||
|
||||
this.infoServer = (secureResources == null) ? builder.build() :
|
||||
builder.setConnector(secureResources.getListener()).build();
|
||||
this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
|
||||
this.infoServer.addInternalServlet(null, "/getFileChecksum/*",
|
||||
FileChecksumServlets.GetServlet.class);
|
||||
|
@ -390,6 +398,7 @@ public class DataNode extends Configured
|
|||
WebHdfsFileSystem.PATH_PREFIX + "/*");
|
||||
}
|
||||
this.infoServer.start();
|
||||
this.infoPort = infoServer.getConnectorAddress(0).getPort();
|
||||
}
|
||||
|
||||
private void startPlugins(Configuration conf) {
|
||||
|
@ -712,7 +721,7 @@ public class DataNode extends Configured
|
|||
this.dnConf = new DNConf(conf);
|
||||
|
||||
if (dnConf.maxLockedMemory > 0) {
|
||||
if (!NativeIO.isAvailable()) {
|
||||
if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
|
||||
throw new RuntimeException(String.format(
|
||||
"Cannot start datanode because the configured max locked memory" +
|
||||
" size (%s) is greater than zero and native code is not available.",
|
||||
|
@ -2320,7 +2329,7 @@ public class DataNode extends Configured
|
|||
* @return the datanode's http port
|
||||
*/
|
||||
public int getInfoPort() {
|
||||
return infoServer.getPort();
|
||||
return infoPort;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -310,7 +310,16 @@ public class DataStorage extends Storage {
|
|||
@Override
|
||||
protected void setFieldsFromProperties(Properties props, StorageDirectory sd)
|
||||
throws IOException {
|
||||
setFieldsFromProperties(props, sd, false, 0);
|
||||
}
|
||||
|
||||
private void setFieldsFromProperties(Properties props, StorageDirectory sd,
|
||||
boolean overrideLayoutVersion, int toLayoutVersion) throws IOException {
|
||||
if (overrideLayoutVersion) {
|
||||
this.layoutVersion = toLayoutVersion;
|
||||
} else {
|
||||
setLayoutVersion(props, sd);
|
||||
}
|
||||
setcTime(props, sd);
|
||||
setStorageType(props, sd);
|
||||
setClusterId(props, layoutVersion, sd);
|
||||
|
@ -374,13 +383,20 @@ public class DataStorage extends Storage {
|
|||
return true;
|
||||
}
|
||||
|
||||
/** Read VERSION file for rollback */
|
||||
void readProperties(StorageDirectory sd, int rollbackLayoutVersion)
|
||||
throws IOException {
|
||||
Properties props = readPropertiesFile(sd.getVersionFile());
|
||||
setFieldsFromProperties(props, sd, true, rollbackLayoutVersion);
|
||||
}
|
||||
|
||||
/**
|
||||
* Analize which and whether a transition of the fs state is required
|
||||
* and perform it if necessary.
|
||||
*
|
||||
* Rollback if previousLV >= LAYOUT_VERSION && prevCTime <= namenode.cTime
|
||||
* Upgrade if this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime
|
||||
* Regular startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime
|
||||
* Rollback if the rollback startup option was specified.
|
||||
* Upgrade if this.LV > LAYOUT_VERSION
|
||||
* Regular startup if this.LV = LAYOUT_VERSION
|
||||
*
|
||||
* @param datanode Datanode to which this storage belongs to
|
||||
* @param sd storage directory
|
||||
|
@ -420,25 +436,28 @@ public class DataStorage extends Storage {
|
|||
+ nsInfo.getClusterID() + "; datanode clusterID = " + getClusterID());
|
||||
}
|
||||
|
||||
// regular start up
|
||||
if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION
|
||||
&& this.cTime == nsInfo.getCTime())
|
||||
// After addition of the federation feature, ctime check is only
|
||||
// meaningful at BlockPoolSliceStorage level.
|
||||
|
||||
// regular start up.
|
||||
if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION)
|
||||
return; // regular startup
|
||||
|
||||
// do upgrade
|
||||
if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION
|
||||
|| this.cTime < nsInfo.getCTime()) {
|
||||
if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION) {
|
||||
doUpgrade(sd, nsInfo); // upgrade
|
||||
return;
|
||||
}
|
||||
|
||||
// layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
|
||||
// must shutdown
|
||||
throw new IOException("Datanode state: LV = " + this.getLayoutVersion()
|
||||
+ " CTime = " + this.getCTime()
|
||||
+ " is newer than the namespace state: LV = "
|
||||
+ nsInfo.getLayoutVersion()
|
||||
+ " CTime = " + nsInfo.getCTime());
|
||||
// layoutVersion < LAYOUT_VERSION. I.e. stored layout version is newer
|
||||
// than the version supported by datanode. This should have been caught
|
||||
// in readProperties(), even if rollback was not carried out or somehow
|
||||
// failed.
|
||||
throw new IOException("BUG: The stored LV = " + this.getLayoutVersion()
|
||||
+ " is newer than the supported LV = "
|
||||
+ HdfsConstants.LAYOUT_VERSION
|
||||
+ " or name node LV = "
|
||||
+ nsInfo.getLayoutVersion());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -464,8 +483,13 @@ public class DataStorage extends Storage {
|
|||
* @throws IOException on error
|
||||
*/
|
||||
void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
|
||||
// If the existing on-disk layout version supportes federation, simply
|
||||
// update its layout version.
|
||||
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
|
||||
clusterID = nsInfo.getClusterID();
|
||||
// The VERSION file is already read in. Override the layoutVersion
|
||||
// field and overwrite the file.
|
||||
LOG.info("Updating layout version from " + layoutVersion + " to "
|
||||
+ nsInfo.getLayoutVersion() + " for storage " + sd.getRoot());
|
||||
layoutVersion = nsInfo.getLayoutVersion();
|
||||
writeProperties(sd);
|
||||
return;
|
||||
|
@ -550,15 +574,32 @@ public class DataStorage extends Storage {
|
|||
* <li> Remove removed.tmp </li>
|
||||
* </ol>
|
||||
*
|
||||
* Do nothing, if previous directory does not exist.
|
||||
* If previous directory does not exist and the current version supports
|
||||
* federation, perform a simple rollback of layout version. This does not
|
||||
* involve saving/restoration of actual data.
|
||||
*/
|
||||
void doRollback( StorageDirectory sd,
|
||||
NamespaceInfo nsInfo
|
||||
) throws IOException {
|
||||
File prevDir = sd.getPreviousDir();
|
||||
// regular startup if previous dir does not exist
|
||||
if (!prevDir.exists())
|
||||
// This is a regular startup or a post-federation rollback
|
||||
if (!prevDir.exists()) {
|
||||
// The current datanode version supports federation and the layout
|
||||
// version from namenode matches what the datanode supports. An invalid
|
||||
// rollback may happen if namenode didn't rollback and datanode is
|
||||
// running a wrong version. But this will be detected in block pool
|
||||
// level and the invalid VERSION content will be overwritten when
|
||||
// the error is corrected and rollback is retried.
|
||||
if (LayoutVersion.supports(Feature.FEDERATION,
|
||||
HdfsConstants.LAYOUT_VERSION) &&
|
||||
HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion()) {
|
||||
readProperties(sd, nsInfo.getLayoutVersion());
|
||||
writeProperties(sd);
|
||||
LOG.info("Layout version rolled back to " +
|
||||
nsInfo.getLayoutVersion() + " for storage " + sd.getRoot());
|
||||
}
|
||||
return;
|
||||
}
|
||||
DataStorage prevInfo = new DataStorage();
|
||||
prevInfo.readPreviousVersionProperties(sd);
|
||||
|
||||
|
|
|
@ -145,6 +145,8 @@ public class FsDatasetCache {
|
|||
*/
|
||||
private final HashMap<Key, Value> mappableBlockMap = new HashMap<Key, Value>();
|
||||
|
||||
private final AtomicLong numBlocksCached = new AtomicLong(0);
|
||||
|
||||
private final FsDatasetImpl dataset;
|
||||
|
||||
private final ThreadPoolExecutor uncachingExecutor;
|
||||
|
@ -417,6 +419,7 @@ public class FsDatasetCache {
|
|||
LOG.debug("Successfully cached block " + key.id + " in " + key.bpid +
|
||||
". We are now caching " + newUsedBytes + " bytes in total.");
|
||||
}
|
||||
numBlocksCached.addAndGet(1);
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
|
@ -465,6 +468,7 @@ public class FsDatasetCache {
|
|||
}
|
||||
long newUsedBytes =
|
||||
usedBytesCount.release(value.mappableBlock.getLength());
|
||||
numBlocksCached.addAndGet(-1);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Uncaching of block " + key.id + " in " + key.bpid +
|
||||
" completed. usedBytes = " + newUsedBytes);
|
||||
|
@ -477,14 +481,14 @@ public class FsDatasetCache {
|
|||
/**
|
||||
* Get the approximate amount of cache space used.
|
||||
*/
|
||||
public long getDnCacheUsed() {
|
||||
public long getCacheUsed() {
|
||||
return usedBytesCount.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the maximum amount of bytes we can cache. This is a constant.
|
||||
*/
|
||||
public long getDnCacheCapacity() {
|
||||
public long getCacheCapacity() {
|
||||
return maxBytes;
|
||||
}
|
||||
|
||||
|
@ -496,4 +500,7 @@ public class FsDatasetCache {
|
|||
return numBlocksFailedToUncache.get();
|
||||
}
|
||||
|
||||
public long getNumBlocksCached() {
|
||||
return numBlocksCached.get();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -341,12 +341,12 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
|
||||
@Override // FSDatasetMBean
|
||||
public long getCacheUsed() {
|
||||
return cacheManager.getDnCacheUsed();
|
||||
return cacheManager.getCacheUsed();
|
||||
}
|
||||
|
||||
@Override // FSDatasetMBean
|
||||
public long getCacheCapacity() {
|
||||
return cacheManager.getDnCacheCapacity();
|
||||
return cacheManager.getCacheCapacity();
|
||||
}
|
||||
|
||||
@Override // FSDatasetMBean
|
||||
|
@ -359,6 +359,11 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
return cacheManager.getNumBlocksFailedToUncache();
|
||||
}
|
||||
|
||||
@Override // FSDatasetMBean
|
||||
public long getNumBlocksCached() {
|
||||
return cacheManager.getNumBlocksCached();
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the block's on-disk length
|
||||
*/
|
||||
|
|
|
@ -88,6 +88,11 @@ public interface FSDatasetMBean {
|
|||
*/
|
||||
public long getCacheCapacity();
|
||||
|
||||
/**
|
||||
* Returns the number of blocks cached.
|
||||
*/
|
||||
public long getNumBlocksCached();
|
||||
|
||||
/**
|
||||
* Returns the number of blocks that the datanode was unable to cache
|
||||
*/
|
||||
|
|
|
@ -17,12 +17,12 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT;
|
||||
|
||||
|
@ -43,17 +43,18 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.InvalidRequestException;
|
||||
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
|
||||
import org.apache.hadoop.fs.InvalidRequestException;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirective;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirective;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
|
||||
|
@ -99,24 +100,24 @@ public final class CacheManager {
|
|||
private final BlockManager blockManager;
|
||||
|
||||
/**
|
||||
* Cache entries, sorted by ID.
|
||||
* Cache directives, sorted by ID.
|
||||
*
|
||||
* listCacheDirectives relies on the ordering of elements in this map
|
||||
* to track what has already been listed by the client.
|
||||
*/
|
||||
private final TreeMap<Long, CacheDirective> entriesById =
|
||||
private final TreeMap<Long, CacheDirective> directivesById =
|
||||
new TreeMap<Long, CacheDirective>();
|
||||
|
||||
/**
|
||||
* The entry ID to use for a new entry. Entry IDs always increase, and are
|
||||
* The directive ID to use for a new directive. IDs always increase, and are
|
||||
* never reused.
|
||||
*/
|
||||
private long nextEntryId;
|
||||
private long nextDirectiveId;
|
||||
|
||||
/**
|
||||
* Cache entries, sorted by path
|
||||
* Cache directives, sorted by path
|
||||
*/
|
||||
private final TreeMap<String, List<CacheDirective>> entriesByPath =
|
||||
private final TreeMap<String, List<CacheDirective>> directivesByPath =
|
||||
new TreeMap<String, List<CacheDirective>>();
|
||||
|
||||
/**
|
||||
|
@ -177,7 +178,7 @@ public final class CacheManager {
|
|||
BlockManager blockManager) {
|
||||
this.namesystem = namesystem;
|
||||
this.blockManager = blockManager;
|
||||
this.nextEntryId = 1;
|
||||
this.nextDirectiveId = 1;
|
||||
this.maxListCachePoolsResponses = conf.getInt(
|
||||
DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
|
||||
DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
|
||||
|
@ -239,7 +240,7 @@ public final class CacheManager {
|
|||
|
||||
public TreeMap<Long, CacheDirective> getEntriesById() {
|
||||
assert namesystem.hasReadLock();
|
||||
return entriesById;
|
||||
return directivesById;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
@ -248,12 +249,12 @@ public final class CacheManager {
|
|||
return cachedBlocks;
|
||||
}
|
||||
|
||||
private long getNextEntryId() throws IOException {
|
||||
private long getNextDirectiveId() throws IOException {
|
||||
assert namesystem.hasWriteLock();
|
||||
if (nextEntryId >= Long.MAX_VALUE - 1) {
|
||||
if (nextDirectiveId >= Long.MAX_VALUE - 1) {
|
||||
throw new IOException("No more available IDs.");
|
||||
}
|
||||
return nextEntryId++;
|
||||
return nextDirectiveId++;
|
||||
}
|
||||
|
||||
// Helper getter / validation methods
|
||||
|
@ -301,7 +302,35 @@ public final class CacheManager {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get a CacheDirective by ID, validating the ID and that the entry
|
||||
* Calculates the absolute expiry time of the directive from the
|
||||
* {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration
|
||||
* into an absolute time based on the local clock.
|
||||
*
|
||||
* @param directive from which to get the expiry time
|
||||
* @param defaultValue to use if Expiration is not set
|
||||
* @return Absolute expiry time in milliseconds since Unix epoch
|
||||
* @throws InvalidRequestException if the Expiration is invalid
|
||||
*/
|
||||
private static long validateExpiryTime(CacheDirectiveInfo directive,
|
||||
long defaultValue) throws InvalidRequestException {
|
||||
long expiryTime;
|
||||
CacheDirectiveInfo.Expiration expiration = directive.getExpiration();
|
||||
if (expiration != null) {
|
||||
if (expiration.getMillis() < 0) {
|
||||
throw new InvalidRequestException("Cannot set a negative expiration: "
|
||||
+ expiration.getMillis());
|
||||
}
|
||||
// Converts a relative duration into an absolute time based on the local
|
||||
// clock
|
||||
expiryTime = expiration.getAbsoluteMillis();
|
||||
} else {
|
||||
expiryTime = defaultValue;
|
||||
}
|
||||
return expiryTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a CacheDirective by ID, validating the ID and that the directive
|
||||
* exists.
|
||||
*/
|
||||
private CacheDirective getById(long id) throws InvalidRequestException {
|
||||
|
@ -309,13 +338,13 @@ public final class CacheManager {
|
|||
if (id <= 0) {
|
||||
throw new InvalidRequestException("Invalid negative ID.");
|
||||
}
|
||||
// Find the entry.
|
||||
CacheDirective entry = entriesById.get(id);
|
||||
if (entry == null) {
|
||||
// Find the directive.
|
||||
CacheDirective directive = directivesById.get(id);
|
||||
if (directive == null) {
|
||||
throw new InvalidRequestException("No directive with ID " + id
|
||||
+ " found.");
|
||||
}
|
||||
return entry;
|
||||
return directive;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -332,122 +361,134 @@ public final class CacheManager {
|
|||
|
||||
// RPC handlers
|
||||
|
||||
private void addInternal(CacheDirective entry) {
|
||||
entriesById.put(entry.getEntryId(), entry);
|
||||
String path = entry.getPath();
|
||||
List<CacheDirective> entryList = entriesByPath.get(path);
|
||||
if (entryList == null) {
|
||||
entryList = new ArrayList<CacheDirective>(1);
|
||||
entriesByPath.put(path, entryList);
|
||||
private void addInternal(CacheDirective directive, CachePool pool) {
|
||||
boolean addedDirective = pool.getDirectiveList().add(directive);
|
||||
assert addedDirective;
|
||||
directivesById.put(directive.getId(), directive);
|
||||
String path = directive.getPath();
|
||||
List<CacheDirective> directives = directivesByPath.get(path);
|
||||
if (directives == null) {
|
||||
directives = new ArrayList<CacheDirective>(1);
|
||||
directivesByPath.put(path, directives);
|
||||
}
|
||||
entryList.add(entry);
|
||||
directives.add(directive);
|
||||
}
|
||||
|
||||
/**
|
||||
* To be called only from the edit log loading code
|
||||
*/
|
||||
CacheDirectiveInfo addDirectiveFromEditLog(CacheDirectiveInfo directive)
|
||||
throws InvalidRequestException {
|
||||
long id = directive.getId();
|
||||
CacheDirective entry =
|
||||
new CacheDirective(
|
||||
directive.getId(),
|
||||
directive.getPath().toUri().getPath(),
|
||||
directive.getReplication(),
|
||||
directive.getExpiration().getAbsoluteMillis());
|
||||
CachePool pool = cachePools.get(directive.getPool());
|
||||
addInternal(entry, pool);
|
||||
if (nextDirectiveId <= id) {
|
||||
nextDirectiveId = id + 1;
|
||||
}
|
||||
return entry.toInfo();
|
||||
}
|
||||
|
||||
public CacheDirectiveInfo addDirective(
|
||||
CacheDirectiveInfo directive, FSPermissionChecker pc)
|
||||
CacheDirectiveInfo info, FSPermissionChecker pc)
|
||||
throws IOException {
|
||||
assert namesystem.hasWriteLock();
|
||||
CacheDirective entry;
|
||||
CacheDirective directive;
|
||||
try {
|
||||
CachePool pool = getCachePool(validatePoolName(directive));
|
||||
CachePool pool = getCachePool(validatePoolName(info));
|
||||
checkWritePermission(pc, pool);
|
||||
String path = validatePath(directive);
|
||||
short replication = validateReplication(directive, (short)1);
|
||||
long id;
|
||||
if (directive.getId() != null) {
|
||||
// We are loading an entry from the edit log.
|
||||
// Use the ID from the edit log.
|
||||
id = directive.getId();
|
||||
if (id <= 0) {
|
||||
throw new InvalidRequestException("can't add an ID " +
|
||||
"of " + id + ": it is not positive.");
|
||||
}
|
||||
if (id >= Long.MAX_VALUE) {
|
||||
throw new InvalidRequestException("can't add an ID " +
|
||||
"of " + id + ": it is too big.");
|
||||
}
|
||||
if (nextEntryId <= id) {
|
||||
nextEntryId = id + 1;
|
||||
}
|
||||
} else {
|
||||
String path = validatePath(info);
|
||||
short replication = validateReplication(info, (short)1);
|
||||
long expiryTime = validateExpiryTime(info,
|
||||
CacheDirectiveInfo.Expiration.EXPIRY_NEVER);
|
||||
// All validation passed
|
||||
// Add a new entry with the next available ID.
|
||||
id = getNextEntryId();
|
||||
}
|
||||
entry = new CacheDirective(id, path, replication, pool);
|
||||
addInternal(entry);
|
||||
long id = getNextDirectiveId();
|
||||
directive = new CacheDirective(id, path, replication, expiryTime);
|
||||
addInternal(directive, pool);
|
||||
} catch (IOException e) {
|
||||
LOG.warn("addDirective of " + directive + " failed: ", e);
|
||||
LOG.warn("addDirective of " + info + " failed: ", e);
|
||||
throw e;
|
||||
}
|
||||
LOG.info("addDirective of " + directive + " successful.");
|
||||
LOG.info("addDirective of " + info + " successful.");
|
||||
if (monitor != null) {
|
||||
monitor.kick();
|
||||
}
|
||||
return entry.toDirective();
|
||||
return directive.toInfo();
|
||||
}
|
||||
|
||||
public void modifyDirective(CacheDirectiveInfo directive,
|
||||
public void modifyDirective(CacheDirectiveInfo info,
|
||||
FSPermissionChecker pc) throws IOException {
|
||||
assert namesystem.hasWriteLock();
|
||||
String idString =
|
||||
(directive.getId() == null) ?
|
||||
"(null)" : directive.getId().toString();
|
||||
(info.getId() == null) ?
|
||||
"(null)" : info.getId().toString();
|
||||
try {
|
||||
// Check for invalid IDs.
|
||||
Long id = directive.getId();
|
||||
Long id = info.getId();
|
||||
if (id == null) {
|
||||
throw new InvalidRequestException("Must supply an ID.");
|
||||
}
|
||||
CacheDirective prevEntry = getById(id);
|
||||
checkWritePermission(pc, prevEntry.getPool());
|
||||
String path = prevEntry.getPath();
|
||||
if (directive.getPath() != null) {
|
||||
path = validatePath(directive);
|
||||
if (info.getPath() != null) {
|
||||
path = validatePath(info);
|
||||
}
|
||||
|
||||
short replication = prevEntry.getReplication();
|
||||
if (directive.getReplication() != null) {
|
||||
replication = validateReplication(directive, replication);
|
||||
}
|
||||
replication = validateReplication(info, replication);
|
||||
|
||||
long expiryTime = prevEntry.getExpiryTime();
|
||||
expiryTime = validateExpiryTime(info, expiryTime);
|
||||
|
||||
CachePool pool = prevEntry.getPool();
|
||||
if (directive.getPool() != null) {
|
||||
pool = getCachePool(validatePoolName(directive));
|
||||
if (info.getPool() != null) {
|
||||
pool = getCachePool(validatePoolName(info));
|
||||
checkWritePermission(pc, pool);
|
||||
}
|
||||
removeInternal(prevEntry);
|
||||
CacheDirective newEntry =
|
||||
new CacheDirective(id, path, replication, pool);
|
||||
addInternal(newEntry);
|
||||
new CacheDirective(id, path, replication, expiryTime);
|
||||
addInternal(newEntry, pool);
|
||||
} catch (IOException e) {
|
||||
LOG.warn("modifyDirective of " + idString + " failed: ", e);
|
||||
throw e;
|
||||
}
|
||||
LOG.info("modifyDirective of " + idString + " successfully applied " +
|
||||
directive + ".");
|
||||
info+ ".");
|
||||
}
|
||||
|
||||
public void removeInternal(CacheDirective existing)
|
||||
public void removeInternal(CacheDirective directive)
|
||||
throws InvalidRequestException {
|
||||
assert namesystem.hasWriteLock();
|
||||
// Remove the corresponding entry in entriesByPath.
|
||||
String path = existing.getPath();
|
||||
List<CacheDirective> entries = entriesByPath.get(path);
|
||||
if (entries == null || !entries.remove(existing)) {
|
||||
// Remove the corresponding entry in directivesByPath.
|
||||
String path = directive.getPath();
|
||||
List<CacheDirective> directives = directivesByPath.get(path);
|
||||
if (directives == null || !directives.remove(directive)) {
|
||||
throw new InvalidRequestException("Failed to locate entry " +
|
||||
existing.getEntryId() + " by path " + existing.getPath());
|
||||
directive.getId() + " by path " + directive.getPath());
|
||||
}
|
||||
if (entries.size() == 0) {
|
||||
entriesByPath.remove(path);
|
||||
if (directives.size() == 0) {
|
||||
directivesByPath.remove(path);
|
||||
}
|
||||
entriesById.remove(existing.getEntryId());
|
||||
directivesById.remove(directive.getId());
|
||||
directive.getPool().getDirectiveList().remove(directive);
|
||||
assert directive.getPool() == null;
|
||||
}
|
||||
|
||||
public void removeDirective(long id, FSPermissionChecker pc)
|
||||
throws IOException {
|
||||
assert namesystem.hasWriteLock();
|
||||
try {
|
||||
CacheDirective existing = getById(id);
|
||||
checkWritePermission(pc, existing.getPool());
|
||||
removeInternal(existing);
|
||||
CacheDirective directive = getById(id);
|
||||
checkWritePermission(pc, directive.getPool());
|
||||
removeInternal(directive);
|
||||
} catch (IOException e) {
|
||||
LOG.warn("removeDirective of " + id + " failed: ", e);
|
||||
throw e;
|
||||
|
@ -478,13 +519,13 @@ public final class CacheManager {
|
|||
new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
|
||||
int numReplies = 0;
|
||||
SortedMap<Long, CacheDirective> tailMap =
|
||||
entriesById.tailMap(prevId + 1);
|
||||
directivesById.tailMap(prevId + 1);
|
||||
for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
|
||||
if (numReplies >= maxListCacheDirectivesNumResponses) {
|
||||
return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
|
||||
}
|
||||
CacheDirective curEntry = cur.getValue();
|
||||
CacheDirectiveInfo info = cur.getValue().toDirective();
|
||||
CacheDirective curDirective = cur.getValue();
|
||||
CacheDirectiveInfo info = cur.getValue().toInfo();
|
||||
if (filter.getPool() != null &&
|
||||
!info.getPool().equals(filter.getPool())) {
|
||||
continue;
|
||||
|
@ -496,7 +537,7 @@ public final class CacheManager {
|
|||
boolean hasPermission = true;
|
||||
if (pc != null) {
|
||||
try {
|
||||
pc.checkPermission(curEntry.getPool(), FsAction.READ);
|
||||
pc.checkPermission(curDirective.getPool(), FsAction.READ);
|
||||
} catch (AccessControlException e) {
|
||||
hasPermission = false;
|
||||
}
|
||||
|
@ -530,7 +571,7 @@ public final class CacheManager {
|
|||
pool = CachePool.createFromInfoAndDefaults(info);
|
||||
cachePools.put(pool.getPoolName(), pool);
|
||||
LOG.info("Created new cache pool " + pool);
|
||||
return pool.getInfo(null);
|
||||
return pool.getInfo(true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -599,39 +640,34 @@ public final class CacheManager {
|
|||
throw new InvalidRequestException(
|
||||
"Cannot remove non-existent cache pool " + poolName);
|
||||
}
|
||||
|
||||
// Remove entries using this pool
|
||||
// TODO: could optimize this somewhat to avoid the need to iterate
|
||||
// over all entries in entriesById
|
||||
Iterator<Entry<Long, CacheDirective>> iter =
|
||||
entriesById.entrySet().iterator();
|
||||
// Remove all directives in this pool.
|
||||
Iterator<CacheDirective> iter = pool.getDirectiveList().iterator();
|
||||
while (iter.hasNext()) {
|
||||
Entry<Long, CacheDirective> entry = iter.next();
|
||||
if (entry.getValue().getPool() == pool) {
|
||||
entriesByPath.remove(entry.getValue().getPath());
|
||||
CacheDirective directive = iter.next();
|
||||
directivesByPath.remove(directive.getPath());
|
||||
directivesById.remove(directive.getId());
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
if (monitor != null) {
|
||||
monitor.kick();
|
||||
}
|
||||
}
|
||||
|
||||
public BatchedListEntries<CachePoolInfo>
|
||||
public BatchedListEntries<CachePoolEntry>
|
||||
listCachePools(FSPermissionChecker pc, String prevKey) {
|
||||
assert namesystem.hasReadLock();
|
||||
final int NUM_PRE_ALLOCATED_ENTRIES = 16;
|
||||
ArrayList<CachePoolInfo> results =
|
||||
new ArrayList<CachePoolInfo>(NUM_PRE_ALLOCATED_ENTRIES);
|
||||
ArrayList<CachePoolEntry> results =
|
||||
new ArrayList<CachePoolEntry>(NUM_PRE_ALLOCATED_ENTRIES);
|
||||
SortedMap<String, CachePool> tailMap = cachePools.tailMap(prevKey, false);
|
||||
int numListed = 0;
|
||||
for (Entry<String, CachePool> cur : tailMap.entrySet()) {
|
||||
if (numListed++ >= maxListCachePoolsResponses) {
|
||||
return new BatchedListEntries<CachePoolInfo>(results, true);
|
||||
return new BatchedListEntries<CachePoolEntry>(results, true);
|
||||
}
|
||||
results.add(cur.getValue().getInfo(pc));
|
||||
results.add(cur.getValue().getEntry(pc));
|
||||
}
|
||||
return new BatchedListEntries<CachePoolInfo>(results, false);
|
||||
return new BatchedListEntries<CachePoolEntry>(results, false);
|
||||
}
|
||||
|
||||
public void setCachedLocations(LocatedBlock block) {
|
||||
|
@ -693,13 +729,6 @@ public final class CacheManager {
|
|||
for (Iterator<Long> iter = blockIds.iterator(); iter.hasNext(); ) {
|
||||
Block block = new Block(iter.next());
|
||||
BlockInfo blockInfo = blockManager.getStoredBlock(block);
|
||||
if (blockInfo.getGenerationStamp() < block.getGenerationStamp()) {
|
||||
// The NameNode will eventually remove or update the out-of-date block.
|
||||
// Until then, we pretend that it isn't cached.
|
||||
LOG.warn("Genstamp in cache report disagrees with our genstamp for " +
|
||||
block + ": expected genstamp " + blockInfo.getGenerationStamp());
|
||||
continue;
|
||||
}
|
||||
if (!blockInfo.isComplete()) {
|
||||
LOG.warn("Ignoring block id " + block.getBlockId() + ", because " +
|
||||
"it is in not complete yet. It is in state " +
|
||||
|
@ -743,9 +772,9 @@ public final class CacheManager {
|
|||
*/
|
||||
public void saveState(DataOutput out, String sdPath)
|
||||
throws IOException {
|
||||
out.writeLong(nextEntryId);
|
||||
out.writeLong(nextDirectiveId);
|
||||
savePools(out, sdPath);
|
||||
saveEntries(out, sdPath);
|
||||
saveDirectives(out, sdPath);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -755,10 +784,10 @@ public final class CacheManager {
|
|||
* @throws IOException
|
||||
*/
|
||||
public void loadState(DataInput in) throws IOException {
|
||||
nextEntryId = in.readLong();
|
||||
// pools need to be loaded first since entries point to their parent pool
|
||||
nextDirectiveId = in.readLong();
|
||||
// pools need to be loaded first since directives point to their parent pool
|
||||
loadPools(in);
|
||||
loadEntries(in);
|
||||
loadDirectives(in);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -773,7 +802,7 @@ public final class CacheManager {
|
|||
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
|
||||
out.writeInt(cachePools.size());
|
||||
for (CachePool pool: cachePools.values()) {
|
||||
pool.getInfo(null).writeTo(out);
|
||||
pool.getInfo(true).writeTo(out);
|
||||
counter.increment();
|
||||
}
|
||||
prog.endStep(Phase.SAVING_CHECKPOINT, step);
|
||||
|
@ -782,19 +811,20 @@ public final class CacheManager {
|
|||
/*
|
||||
* Save cache entries to fsimage
|
||||
*/
|
||||
private void saveEntries(DataOutput out, String sdPath)
|
||||
private void saveDirectives(DataOutput out, String sdPath)
|
||||
throws IOException {
|
||||
StartupProgress prog = NameNode.getStartupProgress();
|
||||
Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
|
||||
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
|
||||
prog.setTotal(Phase.SAVING_CHECKPOINT, step, entriesById.size());
|
||||
prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size());
|
||||
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
|
||||
out.writeInt(entriesById.size());
|
||||
for (CacheDirective entry: entriesById.values()) {
|
||||
out.writeLong(entry.getEntryId());
|
||||
Text.writeString(out, entry.getPath());
|
||||
out.writeShort(entry.getReplication());
|
||||
Text.writeString(out, entry.getPool().getPoolName());
|
||||
out.writeInt(directivesById.size());
|
||||
for (CacheDirective directive : directivesById.values()) {
|
||||
out.writeLong(directive.getId());
|
||||
Text.writeString(out, directive.getPath());
|
||||
out.writeShort(directive.getReplication());
|
||||
Text.writeString(out, directive.getPool().getPoolName());
|
||||
out.writeLong(directive.getExpiryTime());
|
||||
counter.increment();
|
||||
}
|
||||
prog.endStep(Phase.SAVING_CHECKPOINT, step);
|
||||
|
@ -819,38 +849,42 @@ public final class CacheManager {
|
|||
}
|
||||
|
||||
/**
|
||||
* Load cache entries from the fsimage
|
||||
* Load cache directives from the fsimage
|
||||
*/
|
||||
private void loadEntries(DataInput in) throws IOException {
|
||||
private void loadDirectives(DataInput in) throws IOException {
|
||||
StartupProgress prog = NameNode.getStartupProgress();
|
||||
Step step = new Step(StepType.CACHE_ENTRIES);
|
||||
prog.beginStep(Phase.LOADING_FSIMAGE, step);
|
||||
int numberOfEntries = in.readInt();
|
||||
prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfEntries);
|
||||
int numDirectives = in.readInt();
|
||||
prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
|
||||
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
|
||||
for (int i = 0; i < numberOfEntries; i++) {
|
||||
long entryId = in.readLong();
|
||||
for (int i = 0; i < numDirectives; i++) {
|
||||
long directiveId = in.readLong();
|
||||
String path = Text.readString(in);
|
||||
short replication = in.readShort();
|
||||
String poolName = Text.readString(in);
|
||||
long expiryTime = in.readLong();
|
||||
// Get pool reference by looking it up in the map
|
||||
CachePool pool = cachePools.get(poolName);
|
||||
if (pool == null) {
|
||||
throw new IOException("Entry refers to pool " + poolName +
|
||||
throw new IOException("Directive refers to pool " + poolName +
|
||||
", which does not exist.");
|
||||
}
|
||||
CacheDirective entry =
|
||||
new CacheDirective(entryId, path, replication, pool);
|
||||
if (entriesById.put(entry.getEntryId(), entry) != null) {
|
||||
throw new IOException("An entry with ID " + entry.getEntryId() +
|
||||
CacheDirective directive =
|
||||
new CacheDirective(directiveId, path, replication, expiryTime);
|
||||
boolean addedDirective = pool.getDirectiveList().add(directive);
|
||||
assert addedDirective;
|
||||
if (directivesById.put(directive.getId(), directive) != null) {
|
||||
throw new IOException("A directive with ID " + directive.getId() +
|
||||
" already exists");
|
||||
}
|
||||
List<CacheDirective> entries = entriesByPath.get(entry.getPath());
|
||||
if (entries == null) {
|
||||
entries = new LinkedList<CacheDirective>();
|
||||
entriesByPath.put(entry.getPath(), entries);
|
||||
List<CacheDirective> directives =
|
||||
directivesByPath.get(directive.getPath());
|
||||
if (directives == null) {
|
||||
directives = new LinkedList<CacheDirective>();
|
||||
directivesByPath.put(directive.getPath(), directives);
|
||||
}
|
||||
entries.add(entry);
|
||||
directives.add(directive);
|
||||
counter.increment();
|
||||
}
|
||||
prog.endStep(Phase.LOADING_FSIMAGE, step);
|
||||
|
|
|
@ -26,9 +26,13 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirective;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolStats;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.IntrusiveCollection;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
|
@ -69,6 +73,22 @@ public final class CachePool {
|
|||
|
||||
private int weight;
|
||||
|
||||
public final static class DirectiveList
|
||||
extends IntrusiveCollection<CacheDirective> {
|
||||
private CachePool cachePool;
|
||||
|
||||
private DirectiveList(CachePool cachePool) {
|
||||
this.cachePool = cachePool;
|
||||
}
|
||||
|
||||
public CachePool getCachePool() {
|
||||
return cachePool;
|
||||
}
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
private final DirectiveList directiveList = new DirectiveList(this);
|
||||
|
||||
/**
|
||||
* Create a new cache pool based on a CachePoolInfo object and the defaults.
|
||||
* We will fill in information that was not supplied according to the
|
||||
|
@ -171,7 +191,7 @@ public final class CachePool {
|
|||
* @return
|
||||
* Cache pool information.
|
||||
*/
|
||||
private CachePoolInfo getInfo(boolean fullInfo) {
|
||||
CachePoolInfo getInfo(boolean fullInfo) {
|
||||
CachePoolInfo info = new CachePoolInfo(poolName);
|
||||
if (!fullInfo) {
|
||||
return info;
|
||||
|
@ -182,6 +202,19 @@ public final class CachePool {
|
|||
setWeight(weight);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get statistics about this CachePool.
|
||||
*
|
||||
* @return Cache pool statistics.
|
||||
*/
|
||||
private CachePoolStats getStats() {
|
||||
return new CachePoolStats.Builder().
|
||||
setBytesNeeded(0).
|
||||
setBytesCached(0).
|
||||
setFilesAffected(0).
|
||||
build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a CachePoolInfo describing this CachePool based on the permissions
|
||||
* of the calling user. Unprivileged users will see only minimal descriptive
|
||||
|
@ -189,9 +222,9 @@ public final class CachePool {
|
|||
*
|
||||
* @param pc Permission checker to be used to validate the user's permissions,
|
||||
* or null
|
||||
* @return CachePoolInfo describing this CachePool
|
||||
* @return CachePoolEntry describing this CachePool
|
||||
*/
|
||||
public CachePoolInfo getInfo(FSPermissionChecker pc) {
|
||||
public CachePoolEntry getEntry(FSPermissionChecker pc) {
|
||||
boolean hasPermission = true;
|
||||
if (pc != null) {
|
||||
try {
|
||||
|
@ -200,7 +233,8 @@ public final class CachePool {
|
|||
hasPermission = false;
|
||||
}
|
||||
}
|
||||
return getInfo(hasPermission);
|
||||
return new CachePoolEntry(getInfo(hasPermission),
|
||||
hasPermission ? getStats() : new CachePoolStats.Builder().build());
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
|
@ -212,4 +246,8 @@ public final class CachePool {
|
|||
append(", weight:").append(weight).
|
||||
append(" }").toString();
|
||||
}
|
||||
|
||||
public DirectiveList getDirectiveList() {
|
||||
return directiveList;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,121 +17,76 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
/**
|
||||
* Directory INode class that has a quota restriction
|
||||
* Quota feature for {@link INodeDirectory}.
|
||||
*/
|
||||
public class INodeDirectoryWithQuota extends INodeDirectory {
|
||||
public final class DirectoryWithQuotaFeature extends INodeDirectory.Feature {
|
||||
public static final long DEFAULT_NAMESPACE_QUOTA = Long.MAX_VALUE;
|
||||
public static final long DEFAULT_DISKSPACE_QUOTA = HdfsConstants.QUOTA_RESET;
|
||||
|
||||
/** Name space quota */
|
||||
private long nsQuota = Long.MAX_VALUE;
|
||||
private long nsQuota = DEFAULT_NAMESPACE_QUOTA;
|
||||
/** Name space count */
|
||||
private long namespace = 1L;
|
||||
/** Disk space quota */
|
||||
private long dsQuota = HdfsConstants.QUOTA_RESET;
|
||||
private long dsQuota = DEFAULT_DISKSPACE_QUOTA;
|
||||
/** Disk space count */
|
||||
private long diskspace = 0L;
|
||||
|
||||
/** Convert an existing directory inode to one with the given quota
|
||||
*
|
||||
* @param nsQuota Namespace quota to be assigned to this inode
|
||||
* @param dsQuota Diskspace quota to be assigned to this indoe
|
||||
* @param other The other inode from which all other properties are copied
|
||||
*/
|
||||
INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
|
||||
long nsQuota, long dsQuota) {
|
||||
super(other, adopt);
|
||||
final Quota.Counts counts = other.computeQuotaUsage();
|
||||
this.namespace = counts.get(Quota.NAMESPACE);
|
||||
this.diskspace = counts.get(Quota.DISKSPACE);
|
||||
DirectoryWithQuotaFeature(long nsQuota, long dsQuota) {
|
||||
this.nsQuota = nsQuota;
|
||||
this.dsQuota = dsQuota;
|
||||
}
|
||||
|
||||
public INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
|
||||
Quota.Counts quota) {
|
||||
this(other, adopt, quota.get(Quota.NAMESPACE), quota.get(Quota.DISKSPACE));
|
||||
}
|
||||
|
||||
/** constructor with no quota verification */
|
||||
INodeDirectoryWithQuota(long id, byte[] name, PermissionStatus permissions,
|
||||
long modificationTime, long nsQuota, long dsQuota) {
|
||||
super(id, name, permissions, modificationTime);
|
||||
this.nsQuota = nsQuota;
|
||||
this.dsQuota = dsQuota;
|
||||
}
|
||||
|
||||
/** constructor with no quota verification */
|
||||
INodeDirectoryWithQuota(long id, byte[] name, PermissionStatus permissions) {
|
||||
super(id, name, permissions, 0L);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Quota.Counts getQuotaCounts() {
|
||||
/** @return the quota set or -1 if it is not set. */
|
||||
Quota.Counts getQuota() {
|
||||
return Quota.Counts.newInstance(nsQuota, dsQuota);
|
||||
}
|
||||
|
||||
/** Set this directory's quota
|
||||
*
|
||||
* @param nsQuota Namespace quota to be set
|
||||
* @param dsQuota diskspace quota to be set
|
||||
* @param dsQuota Diskspace quota to be set
|
||||
*/
|
||||
public void setQuota(long nsQuota, long dsQuota) {
|
||||
void setQuota(long nsQuota, long dsQuota) {
|
||||
this.nsQuota = nsQuota;
|
||||
this.dsQuota = dsQuota;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache,
|
||||
int lastSnapshotId) {
|
||||
if (useCache && isQuotaSet()) {
|
||||
// use cache value
|
||||
Quota.Counts addNamespaceDiskspace(Quota.Counts counts) {
|
||||
counts.add(Quota.NAMESPACE, namespace);
|
||||
counts.add(Quota.DISKSPACE, diskspace);
|
||||
} else {
|
||||
super.computeQuotaUsage(counts, false, lastSnapshotId);
|
||||
}
|
||||
return counts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ContentSummaryComputationContext computeContentSummary(
|
||||
ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
|
||||
final ContentSummaryComputationContext summary) {
|
||||
final long original = summary.getCounts().get(Content.DISKSPACE);
|
||||
long oldYieldCount = summary.getYieldCount();
|
||||
super.computeContentSummary(summary);
|
||||
dir.computeDirectoryContentSummary(summary);
|
||||
// Check only when the content has not changed in the middle.
|
||||
if (oldYieldCount == summary.getYieldCount()) {
|
||||
checkDiskspace(summary.getCounts().get(Content.DISKSPACE) - original);
|
||||
checkDiskspace(dir, summary.getCounts().get(Content.DISKSPACE) - original);
|
||||
}
|
||||
return summary;
|
||||
}
|
||||
|
||||
private void checkDiskspace(final long computed) {
|
||||
if (-1 != getQuotaCounts().get(Quota.DISKSPACE) && diskspace != computed) {
|
||||
private void checkDiskspace(final INodeDirectory dir, final long computed) {
|
||||
if (-1 != getQuota().get(Quota.DISKSPACE) && diskspace != computed) {
|
||||
NameNode.LOG.error("BUG: Inconsistent diskspace for directory "
|
||||
+ getFullPathName() + ". Cached = " + diskspace
|
||||
+ dir.getFullPathName() + ". Cached = " + diskspace
|
||||
+ " != Computed = " + computed);
|
||||
}
|
||||
}
|
||||
|
||||
/** Get the number of names in the subtree rooted at this directory
|
||||
* @return the size of the subtree rooted at this directory
|
||||
*/
|
||||
long numItemsInTree() {
|
||||
return namespace;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void addSpaceConsumed(final long nsDelta, final long dsDelta,
|
||||
boolean verify) throws QuotaExceededException {
|
||||
if (isQuotaSet()) {
|
||||
void addSpaceConsumed(final INodeDirectory dir, final long nsDelta,
|
||||
final long dsDelta, boolean verify) throws QuotaExceededException {
|
||||
if (dir.isQuotaSet()) {
|
||||
// The following steps are important:
|
||||
// check quotas in this inode and all ancestors before changing counts
|
||||
// so that no change is made if there is any quota violation.
|
||||
|
@ -141,11 +96,11 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
|
|||
verifyQuota(nsDelta, dsDelta);
|
||||
}
|
||||
// (2) verify quota and then add count in ancestors
|
||||
super.addSpaceConsumed(nsDelta, dsDelta, verify);
|
||||
dir.addSpaceConsumed2Parent(nsDelta, dsDelta, verify);
|
||||
// (3) add count in this inode
|
||||
addSpaceConsumed2Cache(nsDelta, dsDelta);
|
||||
} else {
|
||||
super.addSpaceConsumed(nsDelta, dsDelta, verify);
|
||||
dir.addSpaceConsumed2Parent(nsDelta, dsDelta, verify);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -154,7 +109,7 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
|
|||
* @param nsDelta the change of the tree size
|
||||
* @param dsDelta change to disk space occupied
|
||||
*/
|
||||
protected void addSpaceConsumed2Cache(long nsDelta, long dsDelta) {
|
||||
public void addSpaceConsumed2Cache(long nsDelta, long dsDelta) {
|
||||
namespace += nsDelta;
|
||||
diskspace += dsDelta;
|
||||
}
|
||||
|
@ -172,41 +127,42 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
|
|||
this.diskspace = diskspace;
|
||||
}
|
||||
|
||||
/** @return the namespace and diskspace consumed. */
|
||||
public Quota.Counts getSpaceConsumed() {
|
||||
return Quota.Counts.newInstance(namespace, diskspace);
|
||||
}
|
||||
|
||||
/** Verify if the namespace quota is violated after applying delta. */
|
||||
void verifyNamespaceQuota(long delta) throws NSQuotaExceededException {
|
||||
private void verifyNamespaceQuota(long delta) throws NSQuotaExceededException {
|
||||
if (Quota.isViolated(nsQuota, namespace, delta)) {
|
||||
throw new NSQuotaExceededException(nsQuota, namespace + delta);
|
||||
}
|
||||
}
|
||||
/** Verify if the diskspace quota is violated after applying delta. */
|
||||
private void verifyDiskspaceQuota(long delta) throws DSQuotaExceededException {
|
||||
if (Quota.isViolated(dsQuota, diskspace, delta)) {
|
||||
throw new DSQuotaExceededException(dsQuota, diskspace + delta);
|
||||
}
|
||||
}
|
||||
|
||||
/** Verify if the namespace count disk space satisfies the quota restriction
|
||||
* @throws QuotaExceededException if the given quota is less than the count
|
||||
/**
|
||||
* @throws QuotaExceededException if namespace or diskspace quotas is
|
||||
* violated after applying the deltas.
|
||||
*/
|
||||
void verifyQuota(long nsDelta, long dsDelta) throws QuotaExceededException {
|
||||
verifyNamespaceQuota(nsDelta);
|
||||
|
||||
if (Quota.isViolated(dsQuota, diskspace, dsDelta)) {
|
||||
throw new DSQuotaExceededException(dsQuota, diskspace + dsDelta);
|
||||
}
|
||||
verifyDiskspaceQuota(dsDelta);
|
||||
}
|
||||
|
||||
String namespaceString() {
|
||||
private String namespaceString() {
|
||||
return "namespace: " + (nsQuota < 0? "-": namespace + "/" + nsQuota);
|
||||
}
|
||||
String diskspaceString() {
|
||||
private String diskspaceString() {
|
||||
return "diskspace: " + (dsQuota < 0? "-": diskspace + "/" + dsQuota);
|
||||
}
|
||||
String quotaString() {
|
||||
return ", Quota[" + namespaceString() + ", " + diskspaceString() + "]";
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public long getNamespace() {
|
||||
return this.namespace;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public long getDiskspace() {
|
||||
return this.diskspace;
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Quota[" + namespaceString() + ", " + diskspaceString() + "]";
|
||||
}
|
||||
}
|
|
@ -36,8 +36,11 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException;
|
||||
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -100,15 +103,22 @@ public class EditLogFileInputStream extends EditLogInputStream {
|
|||
/**
|
||||
* Open an EditLogInputStream for the given URL.
|
||||
*
|
||||
* @param url the url hosting the log
|
||||
* @param startTxId the expected starting txid
|
||||
* @param endTxId the expected ending txid
|
||||
* @param inProgress whether the log is in-progress
|
||||
* @param connectionFactory
|
||||
* the URLConnectionFactory used to create the connection.
|
||||
* @param url
|
||||
* the url hosting the log
|
||||
* @param startTxId
|
||||
* the expected starting txid
|
||||
* @param endTxId
|
||||
* the expected ending txid
|
||||
* @param inProgress
|
||||
* whether the log is in-progress
|
||||
* @return a stream from which edits may be read
|
||||
*/
|
||||
public static EditLogInputStream fromUrl(URL url, long startTxId,
|
||||
public static EditLogInputStream fromUrl(
|
||||
URLConnectionFactory connectionFactory, URL url, long startTxId,
|
||||
long endTxId, boolean inProgress) {
|
||||
return new EditLogFileInputStream(new URLLog(url),
|
||||
return new EditLogFileInputStream(new URLLog(connectionFactory, url),
|
||||
startTxId, endTxId, inProgress);
|
||||
}
|
||||
|
||||
|
@ -365,8 +375,12 @@ public class EditLogFileInputStream extends EditLogInputStream {
|
|||
private long advertisedSize = -1;
|
||||
|
||||
private final static String CONTENT_LENGTH = "Content-Length";
|
||||
private final URLConnectionFactory connectionFactory;
|
||||
private final boolean isSpnegoEnabled;
|
||||
|
||||
public URLLog(URL url) {
|
||||
public URLLog(URLConnectionFactory connectionFactory, URL url) {
|
||||
this.connectionFactory = connectionFactory;
|
||||
this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled();
|
||||
this.url = url;
|
||||
}
|
||||
|
||||
|
@ -376,8 +390,13 @@ public class EditLogFileInputStream extends EditLogInputStream {
|
|||
new PrivilegedExceptionAction<InputStream>() {
|
||||
@Override
|
||||
public InputStream run() throws IOException {
|
||||
HttpURLConnection connection = (HttpURLConnection)
|
||||
SecurityUtil.openSecureHttpConnection(url);
|
||||
HttpURLConnection connection;
|
||||
try {
|
||||
connection = (HttpURLConnection)
|
||||
connectionFactory.openConnection(url, isSpnegoEnabled);
|
||||
} catch (AuthenticationException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
|
||||
if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
|
||||
throw new HttpGetFailedException(
|
||||
|
|
|
@ -24,7 +24,6 @@ import static org.apache.hadoop.util.Time.now;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.jasper.compiler.JspUtil;
|
||||
|
||||
/**
|
||||
* A generic abstract class to support journaling of edits logs into
|
||||
|
@ -141,10 +140,10 @@ public abstract class EditLogOutputStream implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* @return a short HTML snippet suitable for describing the current
|
||||
* @return a short text snippet suitable for describing the current
|
||||
* status of the stream
|
||||
*/
|
||||
public String generateHtmlReport() {
|
||||
return JspUtil.escapeXml(this.toString());
|
||||
public String generateReport() {
|
||||
return toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -87,11 +87,15 @@ import com.google.common.base.Preconditions;
|
|||
*
|
||||
*************************************************/
|
||||
public class FSDirectory implements Closeable {
|
||||
private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) {
|
||||
final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota(
|
||||
private static INodeDirectorySnapshottable createRoot(FSNamesystem namesystem) {
|
||||
final INodeDirectory r = new INodeDirectory(
|
||||
INodeId.ROOT_INODE_ID,
|
||||
INodeDirectory.ROOT_NAME,
|
||||
namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)));
|
||||
namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)),
|
||||
0L);
|
||||
r.addDirectoryWithQuotaFeature(
|
||||
DirectoryWithQuotaFeature.DEFAULT_NAMESPACE_QUOTA,
|
||||
DirectoryWithQuotaFeature.DEFAULT_DISKSPACE_QUOTA);
|
||||
final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(r);
|
||||
s.setSnapshotQuota(0);
|
||||
return s;
|
||||
|
@ -107,7 +111,7 @@ public class FSDirectory implements Closeable {
|
|||
public final static String DOT_INODES_STRING = ".inodes";
|
||||
public final static byte[] DOT_INODES =
|
||||
DFSUtil.string2Bytes(DOT_INODES_STRING);
|
||||
INodeDirectoryWithQuota rootDir;
|
||||
INodeDirectory rootDir;
|
||||
FSImage fsImage;
|
||||
private final FSNamesystem namesystem;
|
||||
private volatile boolean ready = false;
|
||||
|
@ -202,7 +206,7 @@ public class FSDirectory implements Closeable {
|
|||
}
|
||||
|
||||
/** @return the root directory inode. */
|
||||
public INodeDirectoryWithQuota getRoot() {
|
||||
public INodeDirectory getRoot() {
|
||||
return rootDir;
|
||||
}
|
||||
|
||||
|
@ -452,8 +456,8 @@ public class FSDirectory implements Closeable {
|
|||
|
||||
boolean unprotectedRemoveBlock(String path,
|
||||
INodeFile fileNode, Block block) throws IOException {
|
||||
Preconditions.checkArgument(fileNode.isUnderConstruction());
|
||||
// modify file-> block and blocksMap
|
||||
// fileNode should be under construction
|
||||
boolean removed = fileNode.removeLastBlock(block);
|
||||
if (!removed) {
|
||||
return false;
|
||||
|
@ -1800,9 +1804,8 @@ public class FSDirectory implements Closeable {
|
|||
final INode[] inodes = inodesInPath.getINodes();
|
||||
for(int i=0; i < numOfINodes; i++) {
|
||||
if (inodes[i].isQuotaSet()) { // a directory with quota
|
||||
INodeDirectoryWithQuota node = (INodeDirectoryWithQuota) inodes[i]
|
||||
.asDirectory();
|
||||
node.addSpaceConsumed2Cache(nsDelta, dsDelta);
|
||||
inodes[i].asDirectory().getDirectoryWithQuotaFeature()
|
||||
.addSpaceConsumed2Cache(nsDelta, dsDelta);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2035,10 +2038,11 @@ public class FSDirectory implements Closeable {
|
|||
// Stop checking for quota when common ancestor is reached
|
||||
return;
|
||||
}
|
||||
if (inodes[i].isQuotaSet()) { // a directory with quota
|
||||
final DirectoryWithQuotaFeature q
|
||||
= inodes[i].asDirectory().getDirectoryWithQuotaFeature();
|
||||
if (q != null) { // a directory with quota
|
||||
try {
|
||||
((INodeDirectoryWithQuota) inodes[i].asDirectory()).verifyQuota(
|
||||
nsDelta, dsDelta);
|
||||
q.verifyQuota(nsDelta, dsDelta);
|
||||
} catch (QuotaExceededException e) {
|
||||
e.setPathName(getFullPathName(inodes, i));
|
||||
throw e;
|
||||
|
@ -2385,35 +2389,14 @@ public class FSDirectory implements Closeable {
|
|||
if (dsQuota == HdfsConstants.QUOTA_DONT_SET) {
|
||||
dsQuota = oldDsQuota;
|
||||
}
|
||||
if (oldNsQuota == nsQuota && oldDsQuota == dsQuota) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final Snapshot latest = iip.getLatestSnapshot();
|
||||
if (dirNode instanceof INodeDirectoryWithQuota) {
|
||||
INodeDirectoryWithQuota quotaNode = (INodeDirectoryWithQuota) dirNode;
|
||||
Quota.Counts counts = null;
|
||||
if (!quotaNode.isQuotaSet()) {
|
||||
// dirNode must be an INodeDirectoryWithSnapshot whose quota has not
|
||||
// been set yet
|
||||
counts = quotaNode.computeQuotaUsage();
|
||||
}
|
||||
// a directory with quota; so set the quota to the new value
|
||||
quotaNode.setQuota(nsQuota, dsQuota);
|
||||
if (quotaNode.isQuotaSet() && counts != null) {
|
||||
quotaNode.setSpaceConsumed(counts.get(Quota.NAMESPACE),
|
||||
counts.get(Quota.DISKSPACE));
|
||||
} else if (!quotaNode.isQuotaSet() && latest == null) {
|
||||
// do not replace the node if the node is a snapshottable directory
|
||||
// without snapshots
|
||||
if (!(quotaNode instanceof INodeDirectoryWithSnapshot)) {
|
||||
// will not come here for root because root is snapshottable and
|
||||
// root's nsQuota is always set
|
||||
return quotaNode.replaceSelf4INodeDirectory(inodeMap);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// a non-quota directory; so replace it with a directory with quota
|
||||
return dirNode.replaceSelf4Quota(latest, nsQuota, dsQuota, inodeMap);
|
||||
}
|
||||
return (oldNsQuota != nsQuota || oldDsQuota != dsQuota) ? dirNode : null;
|
||||
dirNode = dirNode.recordModification(latest, inodeMap);
|
||||
dirNode.setQuota(nsQuota, dsQuota);
|
||||
return dirNode;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2442,7 +2425,8 @@ public class FSDirectory implements Closeable {
|
|||
long totalInodes() {
|
||||
readLock();
|
||||
try {
|
||||
return rootDir.numItemsInTree();
|
||||
return rootDir.getDirectoryWithQuotaFeature().getSpaceConsumed()
|
||||
.get(Quota.NAMESPACE);
|
||||
} finally {
|
||||
readUnlock();
|
||||
}
|
||||
|
|
|
@ -954,6 +954,10 @@ public class FSEditLog implements LogsPurgeable {
|
|||
logEdit(op);
|
||||
}
|
||||
|
||||
/**
|
||||
* Log a CacheDirectiveInfo returned from
|
||||
* {@link CacheManager#addDirective(CacheDirectiveInfo, FSPermissionChecker)}
|
||||
*/
|
||||
void logAddCacheDirectiveInfo(CacheDirectiveInfo directive,
|
||||
boolean toLogRpcIds) {
|
||||
AddCacheDirectiveInfoOp op =
|
||||
|
|
|
@ -636,17 +636,17 @@ public class FSEditLogLoader {
|
|||
fsNamesys.setLastAllocatedBlockId(allocateBlockIdOp.blockId);
|
||||
break;
|
||||
}
|
||||
case OP_ADD_PATH_BASED_CACHE_DIRECTIVE: {
|
||||
case OP_ADD_CACHE_DIRECTIVE: {
|
||||
AddCacheDirectiveInfoOp addOp = (AddCacheDirectiveInfoOp) op;
|
||||
CacheDirectiveInfo result = fsNamesys.
|
||||
getCacheManager().addDirective(addOp.directive, null);
|
||||
getCacheManager().addDirectiveFromEditLog(addOp.directive);
|
||||
if (toAddRetryCache) {
|
||||
Long id = result.getId();
|
||||
fsNamesys.addCacheEntryWithPayload(op.rpcClientId, op.rpcCallId, id);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE: {
|
||||
case OP_MODIFY_CACHE_DIRECTIVE: {
|
||||
ModifyCacheDirectiveInfoOp modifyOp =
|
||||
(ModifyCacheDirectiveInfoOp) op;
|
||||
fsNamesys.getCacheManager().modifyDirective(
|
||||
|
@ -656,7 +656,7 @@ public class FSEditLogLoader {
|
|||
}
|
||||
break;
|
||||
}
|
||||
case OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE: {
|
||||
case OP_REMOVE_CACHE_DIRECTIVE: {
|
||||
RemoveCacheDirectiveInfoOp removeOp =
|
||||
(RemoveCacheDirectiveInfoOp) op;
|
||||
fsNamesys.getCacheManager().removeDirective(removeOp.id, null);
|
||||
|
|
|
@ -18,9 +18,8 @@
|
|||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_DIRECTIVE;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_POOL;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_PATH_BASED_CACHE_DIRECTIVE;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOCATE_BLOCK_ID;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOW_SNAPSHOT;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN;
|
||||
|
@ -35,10 +34,11 @@ import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_END_LOG
|
|||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_INVALID;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MKDIR;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_CACHE_DIRECTIVE;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_CACHE_POOL;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REASSIGN_LEASE;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_CACHE_DIRECTIVE;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_CACHE_POOL;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_OLD;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_SNAPSHOT;
|
||||
|
@ -64,6 +64,7 @@ import java.io.EOFException;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.EnumMap;
|
||||
import java.util.List;
|
||||
import java.util.zip.CheckedInputStream;
|
||||
|
@ -81,12 +82,12 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DeprecatedUTF8;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.util.XMLUtils;
|
||||
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
|
||||
|
@ -109,7 +110,6 @@ import org.xml.sax.helpers.AttributesImpl;
|
|||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Strings;
|
||||
|
||||
/**
|
||||
* Helper classes for reading the ops from an InputStream.
|
||||
|
@ -165,11 +165,11 @@ public abstract class FSEditLogOp {
|
|||
inst.put(OP_RENAME_SNAPSHOT, new RenameSnapshotOp());
|
||||
inst.put(OP_SET_GENSTAMP_V2, new SetGenstampV2Op());
|
||||
inst.put(OP_ALLOCATE_BLOCK_ID, new AllocateBlockIdOp());
|
||||
inst.put(OP_ADD_PATH_BASED_CACHE_DIRECTIVE,
|
||||
inst.put(OP_ADD_CACHE_DIRECTIVE,
|
||||
new AddCacheDirectiveInfoOp());
|
||||
inst.put(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE,
|
||||
inst.put(OP_MODIFY_CACHE_DIRECTIVE,
|
||||
new ModifyCacheDirectiveInfoOp());
|
||||
inst.put(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE,
|
||||
inst.put(OP_REMOVE_CACHE_DIRECTIVE,
|
||||
new RemoveCacheDirectiveInfoOp());
|
||||
inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp());
|
||||
inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp());
|
||||
|
@ -2874,12 +2874,12 @@ public abstract class FSEditLogOp {
|
|||
CacheDirectiveInfo directive;
|
||||
|
||||
public AddCacheDirectiveInfoOp() {
|
||||
super(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
|
||||
super(OP_ADD_CACHE_DIRECTIVE);
|
||||
}
|
||||
|
||||
static AddCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
|
||||
return (AddCacheDirectiveInfoOp) cache
|
||||
.get(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
|
||||
.get(OP_ADD_CACHE_DIRECTIVE);
|
||||
}
|
||||
|
||||
public AddCacheDirectiveInfoOp setDirective(
|
||||
|
@ -2889,6 +2889,7 @@ public abstract class FSEditLogOp {
|
|||
assert(directive.getPath() != null);
|
||||
assert(directive.getReplication() != null);
|
||||
assert(directive.getPool() != null);
|
||||
assert(directive.getExpiration() != null);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -2898,11 +2899,13 @@ public abstract class FSEditLogOp {
|
|||
String path = FSImageSerialization.readString(in);
|
||||
short replication = FSImageSerialization.readShort(in);
|
||||
String pool = FSImageSerialization.readString(in);
|
||||
long expiryTime = FSImageSerialization.readLong(in);
|
||||
directive = new CacheDirectiveInfo.Builder().
|
||||
setId(id).
|
||||
setPath(new Path(path)).
|
||||
setReplication(replication).
|
||||
setPool(pool).
|
||||
setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiryTime)).
|
||||
build();
|
||||
readRpcIds(in, logVersion);
|
||||
}
|
||||
|
@ -2913,6 +2916,8 @@ public abstract class FSEditLogOp {
|
|||
FSImageSerialization.writeString(directive.getPath().toUri().getPath(), out);
|
||||
FSImageSerialization.writeShort(directive.getReplication(), out);
|
||||
FSImageSerialization.writeString(directive.getPool(), out);
|
||||
FSImageSerialization.writeLong(
|
||||
directive.getExpiration().getMillis(), out);
|
||||
writeRpcIds(rpcClientId, rpcCallId, out);
|
||||
}
|
||||
|
||||
|
@ -2925,6 +2930,8 @@ public abstract class FSEditLogOp {
|
|||
XMLUtils.addSaxString(contentHandler, "REPLICATION",
|
||||
Short.toString(directive.getReplication()));
|
||||
XMLUtils.addSaxString(contentHandler, "POOL", directive.getPool());
|
||||
XMLUtils.addSaxString(contentHandler, "EXPIRATION",
|
||||
"" + directive.getExpiration().getMillis());
|
||||
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
|
||||
}
|
||||
|
||||
|
@ -2935,6 +2942,8 @@ public abstract class FSEditLogOp {
|
|||
setPath(new Path(st.getValue("PATH"))).
|
||||
setReplication(Short.parseShort(st.getValue("REPLICATION"))).
|
||||
setPool(st.getValue("POOL")).
|
||||
setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(
|
||||
Long.parseLong(st.getValue("EXPIRATION")))).
|
||||
build();
|
||||
readRpcIdsFromXml(st);
|
||||
}
|
||||
|
@ -2946,7 +2955,8 @@ public abstract class FSEditLogOp {
|
|||
builder.append("id=" + directive.getId() + ",");
|
||||
builder.append("path=" + directive.getPath().toUri().getPath() + ",");
|
||||
builder.append("replication=" + directive.getReplication() + ",");
|
||||
builder.append("pool=" + directive.getPool());
|
||||
builder.append("pool=" + directive.getPool() + ",");
|
||||
builder.append("expiration=" + directive.getExpiration().getMillis());
|
||||
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
|
||||
builder.append("]");
|
||||
return builder.toString();
|
||||
|
@ -2961,12 +2971,12 @@ public abstract class FSEditLogOp {
|
|||
CacheDirectiveInfo directive;
|
||||
|
||||
public ModifyCacheDirectiveInfoOp() {
|
||||
super(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
|
||||
super(OP_MODIFY_CACHE_DIRECTIVE);
|
||||
}
|
||||
|
||||
static ModifyCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
|
||||
return (ModifyCacheDirectiveInfoOp) cache
|
||||
.get(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
|
||||
.get(OP_MODIFY_CACHE_DIRECTIVE);
|
||||
}
|
||||
|
||||
public ModifyCacheDirectiveInfoOp setDirective(
|
||||
|
@ -2991,7 +3001,12 @@ public abstract class FSEditLogOp {
|
|||
if ((flags & 0x4) != 0) {
|
||||
builder.setPool(FSImageSerialization.readString(in));
|
||||
}
|
||||
if ((flags & ~0x7) != 0) {
|
||||
if ((flags & 0x8) != 0) {
|
||||
builder.setExpiration(
|
||||
CacheDirectiveInfo.Expiration.newAbsolute(
|
||||
FSImageSerialization.readLong(in)));
|
||||
}
|
||||
if ((flags & ~0xF) != 0) {
|
||||
throw new IOException("unknown flags set in " +
|
||||
"ModifyCacheDirectiveInfoOp: " + flags);
|
||||
}
|
||||
|
@ -3005,7 +3020,8 @@ public abstract class FSEditLogOp {
|
|||
byte flags = (byte)(
|
||||
((directive.getPath() != null) ? 0x1 : 0) |
|
||||
((directive.getReplication() != null) ? 0x2 : 0) |
|
||||
((directive.getPool() != null) ? 0x4 : 0)
|
||||
((directive.getPool() != null) ? 0x4 : 0) |
|
||||
((directive.getExpiration() != null) ? 0x8 : 0)
|
||||
);
|
||||
out.writeByte(flags);
|
||||
if (directive.getPath() != null) {
|
||||
|
@ -3018,6 +3034,10 @@ public abstract class FSEditLogOp {
|
|||
if (directive.getPool() != null) {
|
||||
FSImageSerialization.writeString(directive.getPool(), out);
|
||||
}
|
||||
if (directive.getExpiration() != null) {
|
||||
FSImageSerialization.writeLong(directive.getExpiration().getMillis(),
|
||||
out);
|
||||
}
|
||||
writeRpcIds(rpcClientId, rpcCallId, out);
|
||||
}
|
||||
|
||||
|
@ -3036,6 +3056,10 @@ public abstract class FSEditLogOp {
|
|||
if (directive.getPool() != null) {
|
||||
XMLUtils.addSaxString(contentHandler, "POOL", directive.getPool());
|
||||
}
|
||||
if (directive.getExpiration() != null) {
|
||||
XMLUtils.addSaxString(contentHandler, "EXPIRATION",
|
||||
"" + directive.getExpiration().getMillis());
|
||||
}
|
||||
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
|
||||
}
|
||||
|
||||
|
@ -3056,6 +3080,11 @@ public abstract class FSEditLogOp {
|
|||
if (pool != null) {
|
||||
builder.setPool(pool);
|
||||
}
|
||||
String expiryTime = st.getValueOrNull("EXPIRATION");
|
||||
if (expiryTime != null) {
|
||||
builder.setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(
|
||||
Long.parseLong(expiryTime)));
|
||||
}
|
||||
this.directive = builder.build();
|
||||
readRpcIdsFromXml(st);
|
||||
}
|
||||
|
@ -3075,6 +3104,10 @@ public abstract class FSEditLogOp {
|
|||
if (directive.getPool() != null) {
|
||||
builder.append(",").append("pool=").append(directive.getPool());
|
||||
}
|
||||
if (directive.getExpiration() != null) {
|
||||
builder.append(",").append("expiration=").
|
||||
append(directive.getExpiration().getMillis());
|
||||
}
|
||||
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
|
||||
builder.append("]");
|
||||
return builder.toString();
|
||||
|
@ -3089,12 +3122,12 @@ public abstract class FSEditLogOp {
|
|||
long id;
|
||||
|
||||
public RemoveCacheDirectiveInfoOp() {
|
||||
super(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
|
||||
super(OP_REMOVE_CACHE_DIRECTIVE);
|
||||
}
|
||||
|
||||
static RemoveCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
|
||||
return (RemoveCacheDirectiveInfoOp) cache
|
||||
.get(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
|
||||
.get(OP_REMOVE_CACHE_DIRECTIVE);
|
||||
}
|
||||
|
||||
public RemoveCacheDirectiveInfoOp setId(long id) {
|
||||
|
|
|
@ -64,12 +64,12 @@ public enum FSEditLogOpCodes {
|
|||
OP_DISALLOW_SNAPSHOT ((byte) 30),
|
||||
OP_SET_GENSTAMP_V2 ((byte) 31),
|
||||
OP_ALLOCATE_BLOCK_ID ((byte) 32),
|
||||
OP_ADD_PATH_BASED_CACHE_DIRECTIVE ((byte) 33),
|
||||
OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE ((byte) 34),
|
||||
OP_ADD_CACHE_DIRECTIVE ((byte) 33),
|
||||
OP_REMOVE_CACHE_DIRECTIVE ((byte) 34),
|
||||
OP_ADD_CACHE_POOL ((byte) 35),
|
||||
OP_MODIFY_CACHE_POOL ((byte) 36),
|
||||
OP_REMOVE_CACHE_POOL ((byte) 37),
|
||||
OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE ((byte) 38);
|
||||
OP_MODIFY_CACHE_DIRECTIVE ((byte) 38);
|
||||
|
||||
private byte opCode;
|
||||
|
||||
|
|
|
@ -755,7 +755,7 @@ public class FSImage implements Closeable {
|
|||
* This is an update of existing state of the filesystem and does not
|
||||
* throw QuotaExceededException.
|
||||
*/
|
||||
static void updateCountForQuota(INodeDirectoryWithQuota root) {
|
||||
static void updateCountForQuota(INodeDirectory root) {
|
||||
updateCountForQuotaRecursively(root, Quota.Counts.newInstance());
|
||||
}
|
||||
|
||||
|
@ -795,7 +795,7 @@ public class FSImage implements Closeable {
|
|||
+ " quota = " + dsQuota + " < consumed = " + diskspace);
|
||||
}
|
||||
|
||||
((INodeDirectoryWithQuota)dir).setSpaceConsumed(namespace, diskspace);
|
||||
dir.getDirectoryWithQuotaFeature().setSpaceConsumed(namespace, diskspace);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
|||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
|
||||
|
@ -375,7 +375,7 @@ public class FSImageFormat {
|
|||
final long dsQuota = q.get(Quota.DISKSPACE);
|
||||
FSDirectory fsDir = namesystem.dir;
|
||||
if (nsQuota != -1 || dsQuota != -1) {
|
||||
fsDir.rootDir.setQuota(nsQuota, dsQuota);
|
||||
fsDir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
|
||||
}
|
||||
fsDir.rootDir.cloneModificationTime(root);
|
||||
fsDir.rootDir.clonePermissionStatus(root);
|
||||
|
@ -729,10 +729,11 @@ public class FSImageFormat {
|
|||
if (counter != null) {
|
||||
counter.increment();
|
||||
}
|
||||
final INodeDirectory dir = nsQuota >= 0 || dsQuota >= 0?
|
||||
new INodeDirectoryWithQuota(inodeId, localName, permissions,
|
||||
modificationTime, nsQuota, dsQuota)
|
||||
: new INodeDirectory(inodeId, localName, permissions, modificationTime);
|
||||
final INodeDirectory dir = new INodeDirectory(inodeId, localName,
|
||||
permissions, modificationTime);
|
||||
if (nsQuota >= 0 || dsQuota >= 0) {
|
||||
dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
|
||||
}
|
||||
return snapshottable ? new INodeDirectorySnapshottable(dir)
|
||||
: withSnapshot ? new INodeDirectoryWithSnapshot(dir)
|
||||
: dir;
|
||||
|
@ -972,13 +973,14 @@ public class FSImageFormat {
|
|||
checkNotSaved();
|
||||
|
||||
final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
|
||||
FSDirectory fsDir = sourceNamesystem.dir;
|
||||
final INodeDirectory rootDir = sourceNamesystem.dir.rootDir;
|
||||
final long numINodes = rootDir.getDirectoryWithQuotaFeature()
|
||||
.getSpaceConsumed().get(Quota.NAMESPACE);
|
||||
String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
|
||||
Step step = new Step(StepType.INODES, sdPath);
|
||||
StartupProgress prog = NameNode.getStartupProgress();
|
||||
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
|
||||
prog.setTotal(Phase.SAVING_CHECKPOINT, step,
|
||||
fsDir.rootDir.numItemsInTree());
|
||||
prog.setTotal(Phase.SAVING_CHECKPOINT, step, numINodes);
|
||||
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
|
||||
long startTime = now();
|
||||
//
|
||||
|
@ -997,7 +999,7 @@ public class FSImageFormat {
|
|||
// fairness-related deadlock. See the comments on HDFS-2223.
|
||||
out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
|
||||
.getNamespaceID());
|
||||
out.writeLong(fsDir.rootDir.numItemsInTree());
|
||||
out.writeLong(numINodes);
|
||||
out.writeLong(sourceNamesystem.getGenerationStampV1());
|
||||
out.writeLong(sourceNamesystem.getGenerationStampV2());
|
||||
out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch());
|
||||
|
@ -1014,14 +1016,13 @@ public class FSImageFormat {
|
|||
" using " + compression);
|
||||
|
||||
// save the root
|
||||
saveINode2Image(fsDir.rootDir, out, false, referenceMap, counter);
|
||||
saveINode2Image(rootDir, out, false, referenceMap, counter);
|
||||
// save the rest of the nodes
|
||||
saveImage(fsDir.rootDir, out, true, false, counter);
|
||||
saveImage(rootDir, out, true, false, counter);
|
||||
prog.endStep(Phase.SAVING_CHECKPOINT, step);
|
||||
// Now that the step is finished, set counter equal to total to adjust
|
||||
// for possible under-counting due to reference inodes.
|
||||
prog.setCount(Phase.SAVING_CHECKPOINT, step,
|
||||
fsDir.rootDir.numItemsInTree());
|
||||
prog.setCount(Phase.SAVING_CHECKPOINT, step, numINodes);
|
||||
// save files under construction
|
||||
// TODO: for HDFS-5428, since we cannot break the compatibility of
|
||||
// fsimage, we store part of the under-construction files that are only
|
||||
|
|
|
@ -165,6 +165,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
|
||||
|
@ -6421,6 +6422,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return datanodeStatistics.getCapacityRemainingPercent();
|
||||
}
|
||||
|
||||
@Override // NameNodeMXBean
|
||||
public long getCacheCapacity() {
|
||||
return datanodeStatistics.getCacheCapacity();
|
||||
}
|
||||
|
||||
@Override // NameNodeMXBean
|
||||
public long getCacheUsed() {
|
||||
return datanodeStatistics.getCacheUsed();
|
||||
}
|
||||
|
||||
@Override // NameNodeMXBean
|
||||
public long getTotalBlocks() {
|
||||
return getBlocksTotal();
|
||||
|
@ -6627,7 +6638,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
} else if (openForWrite) {
|
||||
EditLogOutputStream elos = jas.getCurrentStream();
|
||||
if (elos != null) {
|
||||
jasMap.put("stream", elos.generateHtmlReport());
|
||||
jasMap.put("stream", elos.generateReport());
|
||||
} else {
|
||||
jasMap.put("stream", "not currently writing");
|
||||
}
|
||||
|
@ -7277,11 +7288,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
getEditLog().logSync();
|
||||
}
|
||||
|
||||
public BatchedListEntries<CachePoolInfo> listCachePools(String prevKey)
|
||||
public BatchedListEntries<CachePoolEntry> listCachePools(String prevKey)
|
||||
throws IOException {
|
||||
final FSPermissionChecker pc =
|
||||
isPermissionEnabled ? getPermissionChecker() : null;
|
||||
BatchedListEntries<CachePoolInfo> results;
|
||||
BatchedListEntries<CachePoolEntry> results;
|
||||
checkOperation(OperationCategory.READ);
|
||||
boolean success = false;
|
||||
readLock();
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
|||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||
|
||||
/**
|
||||
* I-node for file being written.
|
||||
* Feature for under-construction file.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class FileUnderConstructionFeature extends INodeFile.Feature {
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.util.ChunkedArrayList;
|
||||
|
@ -315,7 +314,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
|||
* 1.2.2 Else do nothing with the current INode. Recursively clean its
|
||||
* children.
|
||||
*
|
||||
* 1.3 The current inode is a {@link FileWithSnapshot}.
|
||||
* 1.3 The current inode is a file with snapshot.
|
||||
* Call recordModification(..) to capture the current states.
|
||||
* Mark the INode as deleted.
|
||||
*
|
||||
|
@ -328,7 +327,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
|||
* 2. When deleting a snapshot.
|
||||
* 2.1 To clean {@link INodeFile}: do nothing.
|
||||
* 2.2 To clean {@link INodeDirectory}: recursively clean its children.
|
||||
* 2.3 To clean {@link FileWithSnapshot}: delete the corresponding snapshot in
|
||||
* 2.3 To clean INodeFile with snapshot: delete the corresponding snapshot in
|
||||
* its diff list.
|
||||
* 2.4 To clean {@link INodeDirectoryWithSnapshot}: delete the corresponding
|
||||
* snapshot in its diff list. Recursively clean its children.
|
||||
|
@ -406,6 +405,15 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
|||
*/
|
||||
public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify)
|
||||
throws QuotaExceededException {
|
||||
addSpaceConsumed2Parent(nsDelta, dsDelta, verify);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check and add namespace/diskspace consumed to itself and the ancestors.
|
||||
* @throws QuotaExceededException if quote is violated.
|
||||
*/
|
||||
void addSpaceConsumed2Parent(long nsDelta, long dsDelta, boolean verify)
|
||||
throws QuotaExceededException {
|
||||
if (parent != null) {
|
||||
parent.addSpaceConsumed(nsDelta, dsDelta, verify);
|
||||
}
|
||||
|
@ -744,4 +752,51 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
|||
toDeleteList.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/** INode feature such as {@link FileUnderConstructionFeature}
|
||||
* and {@link DirectoryWithQuotaFeature}.
|
||||
*/
|
||||
interface Feature<F extends Feature<F>> {
|
||||
/** @return the next feature. */
|
||||
public F getNextFeature();
|
||||
|
||||
/** Set the next feature. */
|
||||
public void setNextFeature(F next);
|
||||
|
||||
/** Utility methods such as addFeature and removeFeature. */
|
||||
static class Util {
|
||||
/**
|
||||
* Add a feature to the linked list.
|
||||
* @return the new head.
|
||||
*/
|
||||
static <F extends Feature<F>> F addFeature(F feature, F head) {
|
||||
feature.setNextFeature(head);
|
||||
return feature;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a feature from the linked list.
|
||||
* @return the new head.
|
||||
*/
|
||||
static <F extends Feature<F>> F removeFeature(F feature, F head) {
|
||||
if (feature == head) {
|
||||
final F newHead = head.getNextFeature();
|
||||
head.setNextFeature(null);
|
||||
return newHead;
|
||||
} else if (head != null) {
|
||||
F prev = head;
|
||||
F curr = head.getNextFeature();
|
||||
for (; curr != null && curr != feature;
|
||||
prev = curr, curr = curr.getNextFeature())
|
||||
;
|
||||
if (curr != null) {
|
||||
prev.setNextFeature(curr.getNextFeature());
|
||||
curr.setNextFeature(null);
|
||||
return head;
|
||||
}
|
||||
}
|
||||
throw new IllegalStateException("Feature " + feature + " not found.");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,6 +46,21 @@ import com.google.common.base.Preconditions;
|
|||
*/
|
||||
public class INodeDirectory extends INodeWithAdditionalFields
|
||||
implements INodeDirectoryAttributes {
|
||||
/** Directory related features such as quota and snapshots. */
|
||||
public static abstract class Feature implements INode.Feature<Feature> {
|
||||
private Feature nextFeature;
|
||||
|
||||
@Override
|
||||
public Feature getNextFeature() {
|
||||
return nextFeature;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextFeature(Feature next) {
|
||||
this.nextFeature = next;
|
||||
}
|
||||
}
|
||||
|
||||
/** Cast INode to INodeDirectory. */
|
||||
public static INodeDirectory valueOf(INode inode, Object path
|
||||
) throws FileNotFoundException, PathIsNotDirectoryException {
|
||||
|
@ -64,6 +79,9 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
|
||||
private List<INode> children = null;
|
||||
|
||||
/** A linked list of {@link Feature}s. */
|
||||
private Feature headFeature = null;
|
||||
|
||||
/** constructor */
|
||||
public INodeDirectory(long id, byte[] name, PermissionStatus permissions,
|
||||
long mtime) {
|
||||
|
@ -76,7 +94,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
* @param adopt Indicate whether or not need to set the parent field of child
|
||||
* INodes to the new node
|
||||
*/
|
||||
public INodeDirectory(INodeDirectory other, boolean adopt) {
|
||||
public INodeDirectory(INodeDirectory other, boolean adopt, boolean copyFeatures) {
|
||||
super(other);
|
||||
this.children = other.children;
|
||||
if (adopt && this.children != null) {
|
||||
|
@ -84,6 +102,9 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
child.setParent(this);
|
||||
}
|
||||
}
|
||||
if (copyFeatures) {
|
||||
this.headFeature = other.headFeature;
|
||||
}
|
||||
}
|
||||
|
||||
/** @return true unconditionally. */
|
||||
|
@ -103,6 +124,73 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
return false;
|
||||
}
|
||||
|
||||
void setQuota(long nsQuota, long dsQuota) {
|
||||
DirectoryWithQuotaFeature quota = getDirectoryWithQuotaFeature();
|
||||
if (quota != null) {
|
||||
// already has quota; so set the quota to the new values
|
||||
quota.setQuota(nsQuota, dsQuota);
|
||||
if (!isQuotaSet() && !isRoot()) {
|
||||
removeFeature(quota);
|
||||
}
|
||||
} else {
|
||||
final Quota.Counts c = computeQuotaUsage();
|
||||
quota = addDirectoryWithQuotaFeature(nsQuota, dsQuota);
|
||||
quota.setSpaceConsumed(c.get(Quota.NAMESPACE), c.get(Quota.DISKSPACE));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Quota.Counts getQuotaCounts() {
|
||||
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
|
||||
return q != null? q.getQuota(): super.getQuotaCounts();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify)
|
||||
throws QuotaExceededException {
|
||||
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
|
||||
if (q != null) {
|
||||
q.addSpaceConsumed(this, nsDelta, dsDelta, verify);
|
||||
} else {
|
||||
addSpaceConsumed2Parent(nsDelta, dsDelta, verify);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If the directory contains a {@link DirectoryWithQuotaFeature}, return it;
|
||||
* otherwise, return null.
|
||||
*/
|
||||
public final DirectoryWithQuotaFeature getDirectoryWithQuotaFeature() {
|
||||
for(Feature f = headFeature; f != null; f = f.nextFeature) {
|
||||
if (f instanceof DirectoryWithQuotaFeature) {
|
||||
return (DirectoryWithQuotaFeature)f;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/** Is this directory with quota? */
|
||||
final boolean isWithQuota() {
|
||||
return getDirectoryWithQuotaFeature() != null;
|
||||
}
|
||||
|
||||
DirectoryWithQuotaFeature addDirectoryWithQuotaFeature(
|
||||
long nsQuota, long dsQuota) {
|
||||
Preconditions.checkState(!isWithQuota(), "Directory is already with quota");
|
||||
final DirectoryWithQuotaFeature quota = new DirectoryWithQuotaFeature(
|
||||
nsQuota, dsQuota);
|
||||
addFeature(quota);
|
||||
return quota;
|
||||
}
|
||||
|
||||
private void addFeature(Feature f) {
|
||||
headFeature = INode.Feature.Util.addFeature(f, headFeature);
|
||||
}
|
||||
|
||||
private void removeFeature(Feature f) {
|
||||
headFeature = INode.Feature.Util.removeFeature(f, headFeature);
|
||||
}
|
||||
|
||||
private int searchChildren(byte[] name) {
|
||||
return children == null? -1: Collections.binarySearch(children, name);
|
||||
}
|
||||
|
@ -142,27 +230,6 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace itself with {@link INodeDirectoryWithQuota} or
|
||||
* {@link INodeDirectoryWithSnapshot} depending on the latest snapshot.
|
||||
*/
|
||||
INodeDirectoryWithQuota replaceSelf4Quota(final Snapshot latest,
|
||||
final long nsQuota, final long dsQuota, final INodeMap inodeMap)
|
||||
throws QuotaExceededException {
|
||||
Preconditions.checkState(!(this instanceof INodeDirectoryWithQuota),
|
||||
"this is already an INodeDirectoryWithQuota, this=%s", this);
|
||||
|
||||
if (!this.isInLatestSnapshot(latest)) {
|
||||
final INodeDirectoryWithQuota q = new INodeDirectoryWithQuota(
|
||||
this, true, nsQuota, dsQuota);
|
||||
replaceSelf(q, inodeMap);
|
||||
return q;
|
||||
} else {
|
||||
final INodeDirectoryWithSnapshot s = new INodeDirectoryWithSnapshot(this);
|
||||
s.setQuota(nsQuota, dsQuota);
|
||||
return replaceSelf(s, inodeMap).saveSelf2Snapshot(latest, this);
|
||||
}
|
||||
}
|
||||
/** Replace itself with an {@link INodeDirectorySnapshottable}. */
|
||||
public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
|
||||
Snapshot latest, final INodeMap inodeMap) throws QuotaExceededException {
|
||||
|
@ -183,7 +250,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) {
|
||||
Preconditions.checkState(getClass() != INodeDirectory.class,
|
||||
"the class is already INodeDirectory, this=%s", this);
|
||||
return replaceSelf(new INodeDirectory(this, true), inodeMap);
|
||||
return replaceSelf(new INodeDirectory(this, true, true), inodeMap);
|
||||
}
|
||||
|
||||
/** Replace itself with the given directory. */
|
||||
|
@ -439,6 +506,21 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
@Override
|
||||
public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache,
|
||||
int lastSnapshotId) {
|
||||
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
|
||||
if (q != null) {
|
||||
if (useCache && isQuotaSet()) {
|
||||
q.addNamespaceDiskspace(counts);
|
||||
} else {
|
||||
computeDirectoryQuotaUsage(counts, false, lastSnapshotId);
|
||||
}
|
||||
return counts;
|
||||
} else {
|
||||
return computeDirectoryQuotaUsage(counts, useCache, lastSnapshotId);
|
||||
}
|
||||
}
|
||||
|
||||
Quota.Counts computeDirectoryQuotaUsage(Quota.Counts counts, boolean useCache,
|
||||
int lastSnapshotId) {
|
||||
if (children != null) {
|
||||
for (INode child : children) {
|
||||
child.computeQuotaUsage(counts, useCache, lastSnapshotId);
|
||||
|
@ -456,6 +538,16 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
@Override
|
||||
public ContentSummaryComputationContext computeContentSummary(
|
||||
ContentSummaryComputationContext summary) {
|
||||
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
|
||||
if (q != null) {
|
||||
return q.computeContentSummary(this, summary);
|
||||
} else {
|
||||
return computeDirectoryContentSummary(summary);
|
||||
}
|
||||
}
|
||||
|
||||
ContentSummaryComputationContext computeDirectoryContentSummary(
|
||||
ContentSummaryComputationContext summary) {
|
||||
ReadOnlyList<INode> childrenList = getChildrenList(null);
|
||||
// Explicit traversing is done to enable repositioning after relinquishing
|
||||
// and reacquiring locks.
|
||||
|
@ -570,7 +662,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior,
|
||||
collectedBlocks, removedINodes, null, countDiffChange);
|
||||
if (isQuotaSet()) {
|
||||
((INodeDirectoryWithQuota) this).addSpaceConsumed2Cache(
|
||||
getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
|
||||
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
|
||||
}
|
||||
return counts;
|
||||
|
@ -606,8 +698,9 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
final Snapshot snapshot) {
|
||||
super.dumpTreeRecursively(out, prefix, snapshot);
|
||||
out.print(", childrenSize=" + getChildrenList(snapshot).size());
|
||||
if (this instanceof INodeDirectoryWithQuota) {
|
||||
out.print(((INodeDirectoryWithQuota)this).quotaString());
|
||||
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
|
||||
if (q != null) {
|
||||
out.print(", " + q);
|
||||
}
|
||||
if (this instanceof Snapshot.Root) {
|
||||
out.print(", snapshotId=" + snapshot.getId());
|
||||
|
|
|
@ -29,10 +29,8 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.*;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.Util;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
|
||||
|
@ -47,13 +45,15 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
* A feature contains specific information for a type of INodeFile. E.g.,
|
||||
* we can have separate features for Under-Construction and Snapshot.
|
||||
*/
|
||||
public static abstract class Feature {
|
||||
public static abstract class Feature implements INode.Feature<Feature> {
|
||||
private Feature nextFeature;
|
||||
|
||||
@Override
|
||||
public Feature getNextFeature() {
|
||||
return nextFeature;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextFeature(Feature next) {
|
||||
this.nextFeature = next;
|
||||
}
|
||||
|
@ -157,26 +157,12 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
return getFileUnderConstructionFeature() != null;
|
||||
}
|
||||
|
||||
void addFeature(Feature f) {
|
||||
f.nextFeature = headFeature;
|
||||
headFeature = f;
|
||||
private void addFeature(Feature f) {
|
||||
headFeature = INode.Feature.Util.addFeature(f, headFeature);
|
||||
}
|
||||
|
||||
void removeFeature(Feature f) {
|
||||
if (f == headFeature) {
|
||||
headFeature = headFeature.nextFeature;
|
||||
return;
|
||||
} else if (headFeature != null) {
|
||||
Feature prev = headFeature;
|
||||
Feature curr = headFeature.nextFeature;
|
||||
for (; curr != null && curr != f; prev = curr, curr = curr.nextFeature)
|
||||
;
|
||||
if (curr != null) {
|
||||
prev.nextFeature = curr.nextFeature;
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw new IllegalStateException("Feature " + f + " not found.");
|
||||
private void removeFeature(Feature f) {
|
||||
headFeature = INode.Feature.Util.removeFeature(f, headFeature);
|
||||
}
|
||||
|
||||
/** @return true unconditionally. */
|
||||
|
@ -194,10 +180,10 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
/* Start of Under-Construction Feature */
|
||||
|
||||
/** Convert this file to an {@link INodeFileUnderConstruction}. */
|
||||
public INodeFile toUnderConstruction(String clientName, String clientMachine,
|
||||
INodeFile toUnderConstruction(String clientName, String clientMachine,
|
||||
DatanodeDescriptor clientNode) {
|
||||
Preconditions.checkState(!isUnderConstruction(),
|
||||
"file is already an INodeFileUnderConstruction");
|
||||
"file is already under construction");
|
||||
FileUnderConstructionFeature uc = new FileUnderConstructionFeature(
|
||||
clientName, clientMachine, clientNode);
|
||||
addFeature(uc);
|
||||
|
@ -209,6 +195,8 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
* feature.
|
||||
*/
|
||||
public INodeFile toCompleteFile(long mtime) {
|
||||
Preconditions.checkState(isUnderConstruction(),
|
||||
"file is no longer under construction");
|
||||
FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
|
||||
if (uc != null) {
|
||||
assertAllBlocksComplete();
|
||||
|
@ -235,10 +223,11 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
this.blocks[index] = blk;
|
||||
}
|
||||
|
||||
@Override // BlockCollection
|
||||
@Override // BlockCollection, the file should be under construction
|
||||
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
|
||||
DatanodeStorageInfo[] locations) throws IOException {
|
||||
Preconditions.checkState(isUnderConstruction());
|
||||
Preconditions.checkState(isUnderConstruction(),
|
||||
"file is no longer under construction");
|
||||
|
||||
if (numBlocks() == 0) {
|
||||
throw new IOException("Failed to set last block: File is empty.");
|
||||
|
@ -256,6 +245,8 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
* the last one on the list.
|
||||
*/
|
||||
boolean removeLastBlock(Block oldblock) {
|
||||
Preconditions.checkState(isUnderConstruction(),
|
||||
"file is no longer under construction");
|
||||
if (blocks == null || blocks.length == 0) {
|
||||
return false;
|
||||
}
|
||||
|
@ -307,10 +298,8 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
}
|
||||
|
||||
@Override
|
||||
public final short getBlockReplication() {
|
||||
return this instanceof FileWithSnapshot?
|
||||
Util.getBlockReplication((FileWithSnapshot)this)
|
||||
: getFileReplication(null);
|
||||
public short getBlockReplication() {
|
||||
return getFileReplication(null);
|
||||
}
|
||||
|
||||
/** Set the replication factor of this file. */
|
||||
|
@ -430,8 +419,8 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
clear();
|
||||
removedINodes.add(this);
|
||||
|
||||
if (this instanceof FileWithSnapshot) {
|
||||
((FileWithSnapshot) this).getDiffs().clear();
|
||||
if (this instanceof INodeFileWithSnapshot) {
|
||||
((INodeFileWithSnapshot) this).getDiffs().clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -446,8 +435,8 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
boolean useCache, int lastSnapshotId) {
|
||||
long nsDelta = 1;
|
||||
final long dsDelta;
|
||||
if (this instanceof FileWithSnapshot) {
|
||||
FileDiffList fileDiffList = ((FileWithSnapshot) this).getDiffs();
|
||||
if (this instanceof INodeFileWithSnapshot) {
|
||||
FileDiffList fileDiffList = ((INodeFileWithSnapshot) this).getDiffs();
|
||||
Snapshot last = fileDiffList.getLastSnapshot();
|
||||
List<FileDiff> diffs = fileDiffList.asList();
|
||||
|
||||
|
@ -479,8 +468,8 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
private void computeContentSummary4Snapshot(final Content.Counts counts) {
|
||||
// file length and diskspace only counted for the latest state of the file
|
||||
// i.e. either the current state or the last snapshot
|
||||
if (this instanceof FileWithSnapshot) {
|
||||
final FileWithSnapshot withSnapshot = (FileWithSnapshot)this;
|
||||
if (this instanceof INodeFileWithSnapshot) {
|
||||
final INodeFileWithSnapshot withSnapshot = (INodeFileWithSnapshot) this;
|
||||
final FileDiffList diffs = withSnapshot.getDiffs();
|
||||
final int n = diffs.asList().size();
|
||||
counts.add(Content.FILE, n);
|
||||
|
@ -496,8 +485,8 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
}
|
||||
|
||||
private void computeContentSummary4Current(final Content.Counts counts) {
|
||||
if (this instanceof FileWithSnapshot
|
||||
&& ((FileWithSnapshot)this).isCurrentFileDeleted()) {
|
||||
if (this instanceof INodeFileWithSnapshot
|
||||
&& ((INodeFileWithSnapshot) this).isCurrentFileDeleted()) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -516,8 +505,9 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
* otherwise, get the file size from the given snapshot.
|
||||
*/
|
||||
public final long computeFileSize(Snapshot snapshot) {
|
||||
if (snapshot != null && this instanceof FileWithSnapshot) {
|
||||
final FileDiff d = ((FileWithSnapshot)this).getDiffs().getDiff(snapshot);
|
||||
if (snapshot != null && this instanceof INodeFileWithSnapshot) {
|
||||
final FileDiff d = ((INodeFileWithSnapshot) this).getDiffs().getDiff(
|
||||
snapshot);
|
||||
if (d != null) {
|
||||
return d.getFileSize();
|
||||
}
|
||||
|
|
|
@ -26,8 +26,8 @@ import java.util.List;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -102,8 +102,8 @@ public abstract class INodeReference extends INode {
|
|||
}
|
||||
if (wn != null) {
|
||||
INode referred = wc.getReferredINode();
|
||||
if (referred instanceof FileWithSnapshot) {
|
||||
return ((FileWithSnapshot) referred).getDiffs().getPrior(
|
||||
if (referred instanceof INodeFileWithSnapshot) {
|
||||
return ((INodeFileWithSnapshot) referred).getDiffs().getPrior(
|
||||
wn.lastSnapshotId);
|
||||
} else if (referred instanceof INodeDirectoryWithSnapshot) {
|
||||
return ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior(
|
||||
|
@ -547,8 +547,8 @@ public abstract class INodeReference extends INode {
|
|||
private Snapshot getSelfSnapshot() {
|
||||
INode referred = getReferredINode().asReference().getReferredINode();
|
||||
Snapshot snapshot = null;
|
||||
if (referred instanceof FileWithSnapshot) {
|
||||
snapshot = ((FileWithSnapshot) referred).getDiffs().getPrior(
|
||||
if (referred instanceof INodeFileWithSnapshot) {
|
||||
snapshot = ((INodeFileWithSnapshot) referred).getDiffs().getPrior(
|
||||
lastSnapshotId);
|
||||
} else if (referred instanceof INodeDirectoryWithSnapshot) {
|
||||
snapshot = ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior(
|
||||
|
@ -637,10 +637,10 @@ public abstract class INodeReference extends INode {
|
|||
Snapshot snapshot = getSelfSnapshot(prior);
|
||||
|
||||
INode referred = getReferredINode().asReference().getReferredINode();
|
||||
if (referred instanceof FileWithSnapshot) {
|
||||
if (referred instanceof INodeFileWithSnapshot) {
|
||||
// if referred is a file, it must be a FileWithSnapshot since we did
|
||||
// recordModification before the rename
|
||||
FileWithSnapshot sfile = (FileWithSnapshot) referred;
|
||||
INodeFileWithSnapshot sfile = (INodeFileWithSnapshot) referred;
|
||||
// make sure we mark the file as deleted
|
||||
sfile.deleteCurrentFile();
|
||||
try {
|
||||
|
@ -671,8 +671,8 @@ public abstract class INodeReference extends INode {
|
|||
WithCount wc = (WithCount) getReferredINode().asReference();
|
||||
INode referred = wc.getReferredINode();
|
||||
Snapshot lastSnapshot = null;
|
||||
if (referred instanceof FileWithSnapshot) {
|
||||
lastSnapshot = ((FileWithSnapshot) referred).getDiffs()
|
||||
if (referred instanceof INodeFileWithSnapshot) {
|
||||
lastSnapshot = ((INodeFileWithSnapshot) referred).getDiffs()
|
||||
.getLastSnapshot();
|
||||
} else if (referred instanceof INodeDirectoryWithSnapshot) {
|
||||
lastSnapshot = ((INodeDirectoryWithSnapshot) referred)
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.ha.HealthCheckFailedException;
|
|||
import org.apache.hadoop.ha.ServiceFailedException;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Trash;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
|
||||
|
|
|
@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -69,25 +70,45 @@ public class NameNodeHttpServer {
|
|||
this.bindAddress = bindAddress;
|
||||
}
|
||||
|
||||
public void start() throws IOException {
|
||||
void start() throws IOException {
|
||||
final String infoHost = bindAddress.getHostName();
|
||||
int infoPort = bindAddress.getPort();
|
||||
httpServer = new HttpServer.Builder().setName("hdfs")
|
||||
.setBindAddress(infoHost).setPort(infoPort)
|
||||
HttpServer.Builder builder = new HttpServer.Builder().setName("hdfs")
|
||||
.addEndpoint(URI.create(("http://" + NetUtils.getHostPortString(bindAddress))))
|
||||
.setFindPort(infoPort == 0).setConf(conf).setACL(
|
||||
new AccessControlList(conf.get(DFS_ADMIN, " ")))
|
||||
.setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
|
||||
.setUsernameConfKey(
|
||||
DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
|
||||
.setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
|
||||
DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)).build();
|
||||
DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
|
||||
|
||||
boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
|
||||
if (certSSL) {
|
||||
httpsAddress = NetUtils.createSocketAddr(conf.get(
|
||||
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
|
||||
|
||||
builder.addEndpoint(URI.create("https://"
|
||||
+ NetUtils.getHostPortString(httpsAddress)));
|
||||
Configuration sslConf = new Configuration(false);
|
||||
sslConf.setBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, conf
|
||||
.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
|
||||
DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT));
|
||||
sslConf.addResource(conf.get(
|
||||
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
|
||||
DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
|
||||
}
|
||||
|
||||
httpServer = builder.build();
|
||||
if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) {
|
||||
//add SPNEGO authentication filter for webhdfs
|
||||
final String name = "SPNEGO";
|
||||
final String classname = AuthFilter.class.getName();
|
||||
final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
|
||||
Map<String, String> params = getAuthFilterParams(conf);
|
||||
httpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params,
|
||||
HttpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params,
|
||||
new String[]{pathSpec});
|
||||
HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
|
||||
|
||||
|
@ -97,34 +118,19 @@ public class NameNodeHttpServer {
|
|||
+ ";" + Param.class.getPackage().getName(), pathSpec);
|
||||
}
|
||||
|
||||
boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
|
||||
httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
|
||||
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
|
||||
setupServlets(httpServer, conf);
|
||||
httpServer.start();
|
||||
httpAddress = httpServer.getConnectorAddress(0);
|
||||
if (certSSL) {
|
||||
boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
|
||||
httpsAddress = NetUtils.createSocketAddr(conf.get(
|
||||
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
|
||||
|
||||
Configuration sslConf = new Configuration(false);
|
||||
sslConf.addResource(conf.get(
|
||||
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||
DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
|
||||
httpServer.addSslListener(httpsAddress, sslConf, needClientAuth);
|
||||
httpsAddress = httpServer.getConnectorAddress(1);
|
||||
// assume same ssl port for all datanodes
|
||||
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
|
||||
DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475));
|
||||
httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, datanodeSslPort
|
||||
.getPort());
|
||||
}
|
||||
httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
|
||||
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
|
||||
setupServlets(httpServer, conf);
|
||||
httpServer.start();
|
||||
httpAddress = new InetSocketAddress(bindAddress.getAddress(),
|
||||
httpServer.getPort());
|
||||
if (certSSL) {
|
||||
httpsAddress = new InetSocketAddress(bindAddress.getAddress(),
|
||||
httpServer.getConnectorPort(1));
|
||||
}
|
||||
}
|
||||
|
||||
private Map<String, String> getAuthFilterParams(Configuration conf)
|
||||
|
|
|
@ -102,6 +102,16 @@ public interface NameNodeMXBean {
|
|||
*/
|
||||
public float getPercentRemaining();
|
||||
|
||||
/**
|
||||
* Returns the amount of cache used by the datanode (in bytes).
|
||||
*/
|
||||
public long getCacheUsed();
|
||||
|
||||
/**
|
||||
* Returns the total cache capacity of the datanode (in bytes).
|
||||
*/
|
||||
public long getCacheCapacity();
|
||||
|
||||
/**
|
||||
* Get the total space used by the block pools of this namenode
|
||||
*/
|
||||
|
|
|
@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
|||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
|
@ -1301,26 +1302,26 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
}
|
||||
|
||||
private class ServerSideCachePoolIterator
|
||||
extends BatchedRemoteIterator<String, CachePoolInfo> {
|
||||
extends BatchedRemoteIterator<String, CachePoolEntry> {
|
||||
|
||||
public ServerSideCachePoolIterator(String prevKey) {
|
||||
super(prevKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BatchedEntries<CachePoolInfo> makeRequest(String prevKey)
|
||||
public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
|
||||
throws IOException {
|
||||
return namesystem.listCachePools(prevKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String elementToPrevKey(CachePoolInfo element) {
|
||||
return element.getPoolName();
|
||||
public String elementToPrevKey(CachePoolEntry entry) {
|
||||
return entry.getInfo().getPoolName();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public RemoteIterator<CachePoolInfo> listCachePools(String prevKey)
|
||||
public RemoteIterator<CachePoolEntry> listCachePools(String prevKey)
|
||||
throws IOException {
|
||||
return new ServerSideCachePoolIterator(prevKey);
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.hdfs.BlockReader;
|
||||
import org.apache.hadoop.hdfs.BlockReaderFactory;
|
||||
|
@ -46,9 +47,11 @@ import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
|
@ -139,6 +142,9 @@ public class NamenodeFsck {
|
|||
|
||||
private final Configuration conf;
|
||||
private final PrintWriter out;
|
||||
private List<String> snapshottableDirs = null;
|
||||
|
||||
private BlockPlacementPolicy bpPolicy;
|
||||
|
||||
/**
|
||||
* Filesystem checker.
|
||||
|
@ -162,6 +168,8 @@ public class NamenodeFsck {
|
|||
this.totalDatanodes = totalDatanodes;
|
||||
this.minReplication = minReplication;
|
||||
this.remoteAddress = remoteAddress;
|
||||
this.bpPolicy = BlockPlacementPolicy.getInstance(conf, null,
|
||||
networktopology);
|
||||
|
||||
for (Iterator<String> it = pmap.keySet().iterator(); it.hasNext();) {
|
||||
String key = it.next();
|
||||
|
@ -178,6 +186,8 @@ public class NamenodeFsck {
|
|||
}
|
||||
else if (key.equals("startblockafter")) {
|
||||
this.currentCookie[0] = pmap.get("startblockafter")[0];
|
||||
} else if (key.equals("includeSnapshots")) {
|
||||
this.snapshottableDirs = new ArrayList<String>();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -194,6 +204,16 @@ public class NamenodeFsck {
|
|||
out.println(msg);
|
||||
namenode.getNamesystem().logFsckEvent(path, remoteAddress);
|
||||
|
||||
if (snapshottableDirs != null) {
|
||||
SnapshottableDirectoryStatus[] snapshotDirs = namenode.getRpcServer()
|
||||
.getSnapshottableDirListing();
|
||||
if (snapshotDirs != null) {
|
||||
for (SnapshottableDirectoryStatus dir : snapshotDirs) {
|
||||
snapshottableDirs.add(dir.getFullPath().toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path);
|
||||
if (file != null) {
|
||||
|
||||
|
@ -272,6 +292,14 @@ public class NamenodeFsck {
|
|||
boolean isOpen = false;
|
||||
|
||||
if (file.isDir()) {
|
||||
if (snapshottableDirs != null && snapshottableDirs.contains(path)) {
|
||||
String snapshotPath = (path.endsWith(Path.SEPARATOR) ? path : path
|
||||
+ Path.SEPARATOR)
|
||||
+ HdfsConstants.DOT_SNAPSHOT_DIR;
|
||||
HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo(
|
||||
snapshotPath);
|
||||
check(snapshotPath, snapshotFileInfo, res);
|
||||
}
|
||||
byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
|
||||
DirectoryListing thisListing;
|
||||
if (showFiles) {
|
||||
|
@ -375,9 +403,8 @@ public class NamenodeFsck {
|
|||
locs.length + " replica(s).");
|
||||
}
|
||||
// verify block placement policy
|
||||
BlockPlacementStatus blockPlacementStatus =
|
||||
BlockPlacementPolicy.getInstance(conf, null, networktopology).
|
||||
verifyBlockPlacement(path, lBlk, targetFileReplication);
|
||||
BlockPlacementStatus blockPlacementStatus = bpPolicy
|
||||
.verifyBlockPlacement(path, lBlk, targetFileReplication);
|
||||
if (!blockPlacementStatus.isPlacementPolicySatisfied()) {
|
||||
res.numMisReplicatedBlocks++;
|
||||
misReplicatedPerFile++;
|
||||
|
|
|
@ -335,7 +335,7 @@ class NamenodeJspHelper {
|
|||
} else if (openForWrite) {
|
||||
EditLogOutputStream elos = jas.getCurrentStream();
|
||||
if (elos != null) {
|
||||
out.println(elos.generateHtmlReport());
|
||||
out.println(elos.generateReport());
|
||||
} else {
|
||||
out.println("not currently writing");
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.io.FilenameFilter;
|
|||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Collection;
|
||||
|
@ -256,8 +257,15 @@ public class SecondaryNameNode implements Runnable {
|
|||
|
||||
// initialize the webserver for uploading files.
|
||||
int tmpInfoPort = infoSocAddr.getPort();
|
||||
URI httpEndpoint;
|
||||
try {
|
||||
httpEndpoint = new URI("http://" + NetUtils.getHostPortString(infoSocAddr));
|
||||
} catch (URISyntaxException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
|
||||
infoServer = new HttpServer.Builder().setName("secondary")
|
||||
.setBindAddress(infoBindAddress).setPort(tmpInfoPort)
|
||||
.addEndpoint(httpEndpoint)
|
||||
.setFindPort(tmpInfoPort == 0).setConf(conf).setACL(
|
||||
new AccessControlList(conf.get(DFS_ADMIN, " ")))
|
||||
.setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
|
||||
|
@ -275,7 +283,7 @@ public class SecondaryNameNode implements Runnable {
|
|||
LOG.info("Web server init done");
|
||||
|
||||
// The web-server port can be ephemeral... ensure we have the correct info
|
||||
infoPort = infoServer.getPort();
|
||||
infoPort = infoServer.getConnectorAddress(0).getPort();
|
||||
|
||||
conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort);
|
||||
LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
|
||||
|
|
|
@ -35,7 +35,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
|
@ -46,6 +47,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
|||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
|
||||
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
||||
import org.apache.hadoop.io.MD5Hash;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
@ -62,6 +64,15 @@ public class TransferFsImage {
|
|||
public final static String MD5_HEADER = "X-MD5-Digest";
|
||||
@VisibleForTesting
|
||||
static int timeout = 0;
|
||||
private static URLConnectionFactory connectionFactory;
|
||||
private static boolean isSpnegoEnabled;
|
||||
|
||||
static {
|
||||
Configuration conf = new Configuration();
|
||||
connectionFactory = URLConnectionFactory
|
||||
.newDefaultURLConnectionFactory(conf);
|
||||
isSpnegoEnabled = UserGroupInformation.isSecurityEnabled();
|
||||
}
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(TransferFsImage.class);
|
||||
|
||||
|
@ -250,8 +261,13 @@ public class TransferFsImage {
|
|||
public static MD5Hash doGetUrl(URL url, List<File> localPaths,
|
||||
Storage dstStorage, boolean getChecksum) throws IOException {
|
||||
long startTime = Time.monotonicNow();
|
||||
HttpURLConnection connection = (HttpURLConnection)
|
||||
SecurityUtil.openSecureHttpConnection(url);
|
||||
HttpURLConnection connection;
|
||||
try {
|
||||
connection = (HttpURLConnection)
|
||||
connectionFactory.openConnection(url, isSpnegoEnabled);
|
||||
} catch (AuthenticationException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
|
||||
if (timeout <= 0) {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||
|
||||
/**
|
||||
* The difference of an {@link INodeFile} between two snapshots.
|
||||
*/
|
||||
public class FileDiff extends
|
||||
AbstractINodeDiff<INodeFileWithSnapshot, INodeFileAttributes, FileDiff> {
|
||||
|
||||
/** The file size at snapshot creation time. */
|
||||
private final long fileSize;
|
||||
|
||||
FileDiff(Snapshot snapshot, INodeFile file) {
|
||||
super(snapshot, null, null);
|
||||
fileSize = file.computeFileSize();
|
||||
}
|
||||
|
||||
/** Constructor used by FSImage loading */
|
||||
FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode,
|
||||
FileDiff posteriorDiff, long fileSize) {
|
||||
super(snapshot, snapshotINode, posteriorDiff);
|
||||
this.fileSize = fileSize;
|
||||
}
|
||||
|
||||
/** @return the file size in the snapshot. */
|
||||
public long getFileSize() {
|
||||
return fileSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
Quota.Counts combinePosteriorAndCollectBlocks(
|
||||
INodeFileWithSnapshot currentINode, FileDiff posterior,
|
||||
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
|
||||
return currentINode.updateQuotaAndCollectBlocks(posterior, collectedBlocks,
|
||||
removedINodes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return super.toString() + " fileSize=" + fileSize + ", rep="
|
||||
+ (snapshotINode == null? "?": snapshotINode.getFileReplication());
|
||||
}
|
||||
|
||||
@Override
|
||||
void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
|
||||
writeSnapshot(out);
|
||||
out.writeLong(fileSize);
|
||||
|
||||
// write snapshotINode
|
||||
if (snapshotINode != null) {
|
||||
out.writeBoolean(true);
|
||||
FSImageSerialization.writeINodeFileAttributes(snapshotINode, out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
Quota.Counts destroyDiffAndCollectBlocks(INodeFileWithSnapshot currentINode,
|
||||
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
|
||||
return currentINode.updateQuotaAndCollectBlocks(this, collectedBlocks,
|
||||
removedINodes);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
|
||||
|
||||
/** A list of FileDiffs for storing snapshot data. */
|
||||
public class FileDiffList extends
|
||||
AbstractINodeDiffList<INodeFileWithSnapshot, INodeFileAttributes, FileDiff> {
|
||||
|
||||
@Override
|
||||
FileDiff createDiff(Snapshot snapshot, INodeFileWithSnapshot file) {
|
||||
return new FileDiff(snapshot, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
INodeFileAttributes createSnapshotCopy(INodeFileWithSnapshot currentINode) {
|
||||
return new INodeFileAttributes.SnapshotCopy(currentINode);
|
||||
}
|
||||
}
|
|
@ -1,227 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||
|
||||
/**
|
||||
* An interface for {@link INodeFile} to support snapshot.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface FileWithSnapshot {
|
||||
/**
|
||||
* The difference of an {@link INodeFile} between two snapshots.
|
||||
*/
|
||||
public static class FileDiff extends AbstractINodeDiff<INodeFile, INodeFileAttributes, FileDiff> {
|
||||
/** The file size at snapshot creation time. */
|
||||
private final long fileSize;
|
||||
|
||||
private FileDiff(Snapshot snapshot, INodeFile file) {
|
||||
super(snapshot, null, null);
|
||||
fileSize = file.computeFileSize();
|
||||
}
|
||||
|
||||
/** Constructor used by FSImage loading */
|
||||
FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode,
|
||||
FileDiff posteriorDiff, long fileSize) {
|
||||
super(snapshot, snapshotINode, posteriorDiff);
|
||||
this.fileSize = fileSize;
|
||||
}
|
||||
|
||||
/** @return the file size in the snapshot. */
|
||||
public long getFileSize() {
|
||||
return fileSize;
|
||||
}
|
||||
|
||||
private static Quota.Counts updateQuotaAndCollectBlocks(
|
||||
INodeFile currentINode, FileDiff removed,
|
||||
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
|
||||
FileWithSnapshot sFile = (FileWithSnapshot) currentINode;
|
||||
long oldDiskspace = currentINode.diskspaceConsumed();
|
||||
if (removed.snapshotINode != null) {
|
||||
short replication = removed.snapshotINode.getFileReplication();
|
||||
short currentRepl = currentINode.getBlockReplication();
|
||||
if (currentRepl == 0) {
|
||||
oldDiskspace = currentINode.computeFileSize(true, true) * replication;
|
||||
} else if (replication > currentRepl) {
|
||||
oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
|
||||
* replication;
|
||||
}
|
||||
}
|
||||
|
||||
Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes);
|
||||
|
||||
long dsDelta = oldDiskspace - currentINode.diskspaceConsumed();
|
||||
return Quota.Counts.newInstance(0, dsDelta);
|
||||
}
|
||||
|
||||
@Override
|
||||
Quota.Counts combinePosteriorAndCollectBlocks(INodeFile currentINode,
|
||||
FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
|
||||
final List<INode> removedINodes) {
|
||||
return updateQuotaAndCollectBlocks(currentINode, posterior,
|
||||
collectedBlocks, removedINodes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return super.toString() + " fileSize=" + fileSize + ", rep="
|
||||
+ (snapshotINode == null? "?": snapshotINode.getFileReplication());
|
||||
}
|
||||
|
||||
@Override
|
||||
void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
|
||||
writeSnapshot(out);
|
||||
out.writeLong(fileSize);
|
||||
|
||||
// write snapshotINode
|
||||
if (snapshotINode != null) {
|
||||
out.writeBoolean(true);
|
||||
FSImageSerialization.writeINodeFileAttributes(snapshotINode, out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
Quota.Counts destroyDiffAndCollectBlocks(INodeFile currentINode,
|
||||
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
|
||||
return updateQuotaAndCollectBlocks(currentINode, this,
|
||||
collectedBlocks, removedINodes);
|
||||
}
|
||||
}
|
||||
|
||||
/** A list of FileDiffs for storing snapshot data. */
|
||||
public static class FileDiffList
|
||||
extends AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> {
|
||||
|
||||
@Override
|
||||
FileDiff createDiff(Snapshot snapshot, INodeFile file) {
|
||||
return new FileDiff(snapshot, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
INodeFileAttributes createSnapshotCopy(INodeFile currentINode) {
|
||||
return new INodeFileAttributes.SnapshotCopy(currentINode);
|
||||
}
|
||||
}
|
||||
|
||||
/** @return the {@link INodeFile} view of this object. */
|
||||
public INodeFile asINodeFile();
|
||||
|
||||
/** @return the file diff list. */
|
||||
public FileDiffList getDiffs();
|
||||
|
||||
/** Is the current file deleted? */
|
||||
public boolean isCurrentFileDeleted();
|
||||
|
||||
/** Delete the file from the current tree */
|
||||
public void deleteCurrentFile();
|
||||
|
||||
/** Utility methods for the classes which implement the interface. */
|
||||
public static class Util {
|
||||
/**
|
||||
* @return block replication, which is the max file replication among
|
||||
* the file and the diff list.
|
||||
*/
|
||||
public static short getBlockReplication(final FileWithSnapshot file) {
|
||||
short max = file.isCurrentFileDeleted()? 0
|
||||
: file.asINodeFile().getFileReplication();
|
||||
for(FileDiff d : file.getDiffs()) {
|
||||
if (d.snapshotINode != null) {
|
||||
final short replication = d.snapshotINode.getFileReplication();
|
||||
if (replication > max) {
|
||||
max = replication;
|
||||
}
|
||||
}
|
||||
}
|
||||
return max;
|
||||
}
|
||||
|
||||
/**
|
||||
* If some blocks at the end of the block list no longer belongs to
|
||||
* any inode, collect them and update the block list.
|
||||
*/
|
||||
static void collectBlocksAndClear(final FileWithSnapshot file,
|
||||
final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
|
||||
// check if everything is deleted.
|
||||
if (file.isCurrentFileDeleted()
|
||||
&& file.getDiffs().asList().isEmpty()) {
|
||||
file.asINodeFile().destroyAndCollectBlocks(info, removedINodes);
|
||||
return;
|
||||
}
|
||||
|
||||
// find max file size.
|
||||
final long max;
|
||||
if (file.isCurrentFileDeleted()) {
|
||||
final FileDiff last = file.getDiffs().getLast();
|
||||
max = last == null? 0: last.fileSize;
|
||||
} else {
|
||||
max = file.asINodeFile().computeFileSize();
|
||||
}
|
||||
|
||||
collectBlocksBeyondMax(file, max, info);
|
||||
}
|
||||
|
||||
private static void collectBlocksBeyondMax(final FileWithSnapshot file,
|
||||
final long max, final BlocksMapUpdateInfo collectedBlocks) {
|
||||
final BlockInfo[] oldBlocks = file.asINodeFile().getBlocks();
|
||||
if (oldBlocks != null) {
|
||||
//find the minimum n such that the size of the first n blocks > max
|
||||
int n = 0;
|
||||
for(long size = 0; n < oldBlocks.length && max > size; n++) {
|
||||
size += oldBlocks[n].getNumBytes();
|
||||
}
|
||||
|
||||
// starting from block n, the data is beyond max.
|
||||
if (n < oldBlocks.length) {
|
||||
// resize the array.
|
||||
final BlockInfo[] newBlocks;
|
||||
if (n == 0) {
|
||||
newBlocks = null;
|
||||
} else {
|
||||
newBlocks = new BlockInfo[n];
|
||||
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
|
||||
}
|
||||
|
||||
// set new blocks
|
||||
file.asINodeFile().setBlocks(newBlocks);
|
||||
|
||||
// collect the blocks beyond max.
|
||||
if (collectedBlocks != null) {
|
||||
for(; n < oldBlocks.length; n++) {
|
||||
collectedBlocks.addDeleteBlock(oldBlocks[n]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -432,8 +432,8 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
|||
parentPath.remove(parentPath.size() - 1);
|
||||
}
|
||||
}
|
||||
} else if (node.isFile() && node.asFile() instanceof FileWithSnapshot) {
|
||||
FileWithSnapshot file = (FileWithSnapshot) node.asFile();
|
||||
} else if (node.isFile() && node.asFile() instanceof INodeFileWithSnapshot) {
|
||||
INodeFileWithSnapshot file = (INodeFileWithSnapshot) node.asFile();
|
||||
Snapshot earlierSnapshot = diffReport.isFromEarlier() ? diffReport.from
|
||||
: diffReport.to;
|
||||
Snapshot laterSnapshot = diffReport.isFromEarlier() ? diffReport.to
|
||||
|
@ -441,7 +441,7 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
|||
boolean change = file.getDiffs().changedBetweenSnapshots(earlierSnapshot,
|
||||
laterSnapshot);
|
||||
if (change) {
|
||||
diffReport.addFileDiff(file.asINodeFile(), relativePath);
|
||||
diffReport.addFileDiff(file, relativePath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
|||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||
|
@ -55,7 +54,7 @@ import com.google.common.base.Preconditions;
|
|||
* storing snapshot data. When there are modifications to the directory, the old
|
||||
* data is stored in the latest snapshot, if there is any.
|
||||
*/
|
||||
public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||
public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
||||
/**
|
||||
* The difference between the current state and a previous snapshot
|
||||
* of the children list of an INodeDirectory.
|
||||
|
@ -185,14 +184,10 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
|||
INode dnode = deleted.get(d);
|
||||
if (cnode.compareTo(dnode.getLocalNameBytes()) == 0) {
|
||||
fullPath[fullPath.length - 1] = cnode.getLocalNameBytes();
|
||||
if (cnode.isSymlink() && dnode.isSymlink()) {
|
||||
dList.add(new DiffReportEntry(DiffType.MODIFY, fullPath));
|
||||
} else {
|
||||
// must be the case: delete first and then create an inode with the
|
||||
// same name
|
||||
cList.add(new DiffReportEntry(DiffType.CREATE, fullPath));
|
||||
dList.add(new DiffReportEntry(DiffType.DELETE, fullPath));
|
||||
}
|
||||
c++;
|
||||
d++;
|
||||
} else if (cnode.compareTo(dnode.getLocalNameBytes()) < 0) {
|
||||
|
@ -490,7 +485,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
|||
|
||||
INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt,
|
||||
DirectoryDiffList diffs) {
|
||||
super(that, adopt, that.getQuotaCounts());
|
||||
super(that, adopt, true);
|
||||
this.diffs = diffs != null? diffs: new DirectoryDiffList();
|
||||
}
|
||||
|
||||
|
@ -775,8 +770,8 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
|||
removedINodes, priorDeleted, countDiffChange));
|
||||
|
||||
if (isQuotaSet()) {
|
||||
this.addSpaceConsumed2Cache(-counts.get(Quota.NAMESPACE),
|
||||
-counts.get(Quota.DISKSPACE));
|
||||
getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
|
||||
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
|
||||
}
|
||||
return counts;
|
||||
}
|
||||
|
@ -809,10 +804,10 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
|||
// For DstReference node, since the node is not in the created list of
|
||||
// prior, we should treat it as regular file/dir
|
||||
} else if (topNode.isFile()
|
||||
&& topNode.asFile() instanceof FileWithSnapshot) {
|
||||
FileWithSnapshot fs = (FileWithSnapshot) topNode.asFile();
|
||||
counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior,
|
||||
topNode.asFile(), collectedBlocks, removedINodes, countDiffChange));
|
||||
&& topNode.asFile() instanceof INodeFileWithSnapshot) {
|
||||
INodeFileWithSnapshot fs = (INodeFileWithSnapshot) topNode.asFile();
|
||||
counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior, fs,
|
||||
collectedBlocks, removedINodes, countDiffChange));
|
||||
} else if (topNode.isDirectory()) {
|
||||
INodeDirectory dir = topNode.asDirectory();
|
||||
ChildrenDiff priorChildrenDiff = null;
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.util.List;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
|
||||
|
@ -31,14 +32,13 @@ import org.apache.hadoop.hdfs.server.namenode.Quota;
|
|||
* Represent an {@link INodeFile} that is snapshotted.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class INodeFileWithSnapshot extends INodeFile
|
||||
implements FileWithSnapshot {
|
||||
public class INodeFileWithSnapshot extends INodeFile {
|
||||
private final FileDiffList diffs;
|
||||
private boolean isCurrentFileDeleted = false;
|
||||
|
||||
public INodeFileWithSnapshot(INodeFile f) {
|
||||
this(f, f instanceof FileWithSnapshot?
|
||||
((FileWithSnapshot)f).getDiffs(): null);
|
||||
this(f, f instanceof INodeFileWithSnapshot ?
|
||||
((INodeFileWithSnapshot) f).getDiffs() : null);
|
||||
}
|
||||
|
||||
public INodeFileWithSnapshot(INodeFile f, FileDiffList diffs) {
|
||||
|
@ -46,12 +46,12 @@ public class INodeFileWithSnapshot extends INodeFile
|
|||
this.diffs = diffs != null? diffs: new FileDiffList();
|
||||
}
|
||||
|
||||
@Override
|
||||
/** Is the current file deleted? */
|
||||
public boolean isCurrentFileDeleted() {
|
||||
return isCurrentFileDeleted;
|
||||
}
|
||||
|
||||
@Override
|
||||
/** Delete the file from the current tree */
|
||||
public void deleteCurrentFile() {
|
||||
isCurrentFileDeleted = true;
|
||||
}
|
||||
|
@ -70,12 +70,7 @@ public class INodeFileWithSnapshot extends INodeFile
|
|||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public INodeFile asINodeFile() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
/** @return the file diff list. */
|
||||
public FileDiffList getDiffs() {
|
||||
return diffs;
|
||||
}
|
||||
|
@ -90,7 +85,7 @@ public class INodeFileWithSnapshot extends INodeFile
|
|||
recordModification(prior, null);
|
||||
deleteCurrentFile();
|
||||
}
|
||||
Util.collectBlocksAndClear(this, collectedBlocks, removedINodes);
|
||||
this.collectBlocksAndClear(collectedBlocks, removedINodes);
|
||||
return Quota.Counts.newInstance();
|
||||
} else { // delete a snapshot
|
||||
prior = getDiffs().updatePrior(snapshot, prior);
|
||||
|
@ -104,4 +99,100 @@ public class INodeFileWithSnapshot extends INodeFile
|
|||
return super.toDetailString()
|
||||
+ (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return block replication, which is the max file replication among
|
||||
* the file and the diff list.
|
||||
*/
|
||||
@Override
|
||||
public short getBlockReplication() {
|
||||
short max = isCurrentFileDeleted() ? 0 : getFileReplication();
|
||||
for(FileDiff d : getDiffs()) {
|
||||
if (d.snapshotINode != null) {
|
||||
final short replication = d.snapshotINode.getFileReplication();
|
||||
if (replication > max) {
|
||||
max = replication;
|
||||
}
|
||||
}
|
||||
}
|
||||
return max;
|
||||
}
|
||||
|
||||
/**
|
||||
* If some blocks at the end of the block list no longer belongs to
|
||||
* any inode, collect them and update the block list.
|
||||
*/
|
||||
void collectBlocksAndClear(final BlocksMapUpdateInfo info,
|
||||
final List<INode> removedINodes) {
|
||||
// check if everything is deleted.
|
||||
if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
|
||||
destroyAndCollectBlocks(info, removedINodes);
|
||||
return;
|
||||
}
|
||||
|
||||
// find max file size.
|
||||
final long max;
|
||||
if (isCurrentFileDeleted()) {
|
||||
final FileDiff last = getDiffs().getLast();
|
||||
max = last == null? 0: last.getFileSize();
|
||||
} else {
|
||||
max = computeFileSize();
|
||||
}
|
||||
|
||||
collectBlocksBeyondMax(max, info);
|
||||
}
|
||||
|
||||
private void collectBlocksBeyondMax(final long max,
|
||||
final BlocksMapUpdateInfo collectedBlocks) {
|
||||
final BlockInfo[] oldBlocks = getBlocks();
|
||||
if (oldBlocks != null) {
|
||||
//find the minimum n such that the size of the first n blocks > max
|
||||
int n = 0;
|
||||
for(long size = 0; n < oldBlocks.length && max > size; n++) {
|
||||
size += oldBlocks[n].getNumBytes();
|
||||
}
|
||||
|
||||
// starting from block n, the data is beyond max.
|
||||
if (n < oldBlocks.length) {
|
||||
// resize the array.
|
||||
final BlockInfo[] newBlocks;
|
||||
if (n == 0) {
|
||||
newBlocks = null;
|
||||
} else {
|
||||
newBlocks = new BlockInfo[n];
|
||||
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
|
||||
}
|
||||
|
||||
// set new blocks
|
||||
setBlocks(newBlocks);
|
||||
|
||||
// collect the blocks beyond max.
|
||||
if (collectedBlocks != null) {
|
||||
for(; n < oldBlocks.length; n++) {
|
||||
collectedBlocks.addDeleteBlock(oldBlocks[n]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Quota.Counts updateQuotaAndCollectBlocks(FileDiff removed,
|
||||
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
|
||||
long oldDiskspace = this.diskspaceConsumed();
|
||||
if (removed.snapshotINode != null) {
|
||||
short replication = removed.snapshotINode.getFileReplication();
|
||||
short currentRepl = getBlockReplication();
|
||||
if (currentRepl == 0) {
|
||||
oldDiskspace = computeFileSize(true, true) * replication;
|
||||
} else if (replication > currentRepl) {
|
||||
oldDiskspace = oldDiskspace / getBlockReplication()
|
||||
* replication;
|
||||
}
|
||||
}
|
||||
|
||||
this.collectBlocksAndClear(collectedBlocks, removedINodes);
|
||||
|
||||
long dsDelta = oldDiskspace - diskspaceConsumed();
|
||||
return Quota.Counts.newInstance(0, dsDelta);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ public class Snapshot implements Comparable<byte[]> {
|
|||
/** The root directory of the snapshot. */
|
||||
static public class Root extends INodeDirectory {
|
||||
Root(INodeDirectory other) {
|
||||
super(other, false);
|
||||
super(other, false, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -36,8 +36,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
|
|||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
|
||||
import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
|
||||
|
@ -99,8 +97,8 @@ public class SnapshotFSImageFormat {
|
|||
|
||||
public static void saveFileDiffList(final INodeFile file,
|
||||
final DataOutput out) throws IOException {
|
||||
saveINodeDiffs(file instanceof FileWithSnapshot?
|
||||
((FileWithSnapshot)file).getDiffs(): null, out, null);
|
||||
saveINodeDiffs(file instanceof INodeFileWithSnapshot?
|
||||
((INodeFileWithSnapshot) file).getDiffs(): null, out, null);
|
||||
}
|
||||
|
||||
public static FileDiffList loadFileDiffList(DataInput in,
|
||||
|
|
|
@ -29,11 +29,13 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.CachePool;
|
||||
import org.apache.hadoop.hdfs.tools.TableListing.Justification;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
@ -131,7 +133,8 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
@Override
|
||||
public String getShortUsage() {
|
||||
return "[" + getName() +
|
||||
" -path <path> -replication <replication> -pool <pool-name>]\n";
|
||||
" -path <path> -pool <pool-name> " +
|
||||
"[-replication <replication>] [-ttl <time-to-live>]]\n";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -139,11 +142,15 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
TableListing listing = getOptionDescriptionListing();
|
||||
listing.addRow("<path>", "A path to cache. The path can be " +
|
||||
"a directory or a file.");
|
||||
listing.addRow("<replication>", "The cache replication factor to use. " +
|
||||
"Defaults to 1.");
|
||||
listing.addRow("<pool-name>", "The pool to which the directive will be " +
|
||||
"added. You must have write permission on the cache pool "
|
||||
+ "in order to add new directives.");
|
||||
listing.addRow("<replication>", "The cache replication factor to use. " +
|
||||
"Defaults to 1.");
|
||||
listing.addRow("<time-to-live>", "How long the directive is " +
|
||||
"valid. Can be specified in minutes, hours, and days via e.g. " +
|
||||
"30m, 4h, 2d. Valid units are [smhd]." +
|
||||
" If unspecified, the directive never expires.");
|
||||
return getShortUsage() + "\n" +
|
||||
"Add a new cache directive.\n\n" +
|
||||
listing.toString();
|
||||
|
@ -151,33 +158,48 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
|
||||
@Override
|
||||
public int run(Configuration conf, List<String> args) throws IOException {
|
||||
CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder();
|
||||
|
||||
String path = StringUtils.popOptionWithArgument("-path", args);
|
||||
if (path == null) {
|
||||
System.err.println("You must specify a path with -path.");
|
||||
return 1;
|
||||
}
|
||||
short replication = 1;
|
||||
String replicationString =
|
||||
StringUtils.popOptionWithArgument("-replication", args);
|
||||
if (replicationString != null) {
|
||||
replication = Short.parseShort(replicationString);
|
||||
}
|
||||
builder.setPath(new Path(path));
|
||||
|
||||
String poolName = StringUtils.popOptionWithArgument("-pool", args);
|
||||
if (poolName == null) {
|
||||
System.err.println("You must specify a pool name with -pool.");
|
||||
return 1;
|
||||
}
|
||||
builder.setPool(poolName);
|
||||
|
||||
String replicationString =
|
||||
StringUtils.popOptionWithArgument("-replication", args);
|
||||
if (replicationString != null) {
|
||||
Short replication = Short.parseShort(replicationString);
|
||||
builder.setReplication(replication);
|
||||
}
|
||||
|
||||
String ttlString = StringUtils.popOptionWithArgument("-ttl", args);
|
||||
if (ttlString != null) {
|
||||
try {
|
||||
long ttl = DFSUtil.parseRelativeTime(ttlString);
|
||||
builder.setExpiration(CacheDirectiveInfo.Expiration.newRelative(ttl));
|
||||
} catch (IOException e) {
|
||||
System.err.println(
|
||||
"Error while parsing ttl value: " + e.getMessage());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!args.isEmpty()) {
|
||||
System.err.println("Can't understand argument: " + args.get(0));
|
||||
return 1;
|
||||
}
|
||||
|
||||
DistributedFileSystem dfs = getDFS(conf);
|
||||
CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().
|
||||
setPath(new Path(path)).
|
||||
setReplication(replication).
|
||||
setPool(poolName).
|
||||
build();
|
||||
CacheDirectiveInfo directive = builder.build();
|
||||
try {
|
||||
long id = dfs.addCacheDirective(directive);
|
||||
System.out.println("Added cache directive " + id);
|
||||
|
@ -260,7 +282,7 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
public String getShortUsage() {
|
||||
return "[" + getName() +
|
||||
" -id <id> [-path <path>] [-replication <replication>] " +
|
||||
"[-pool <pool-name>] ]\n";
|
||||
"[-pool <pool-name>] [-ttl <time-to-live>]]\n";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -274,6 +296,10 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
listing.addRow("<pool-name>", "The pool to which the directive will be " +
|
||||
"added. You must have write permission on the cache pool "
|
||||
+ "in order to move a directive into it. (optional)");
|
||||
listing.addRow("<time-to-live>", "How long the directive is " +
|
||||
"valid. Can be specified in minutes, hours, and days via e.g. " +
|
||||
"30m, 4h, 2d. Valid units are [smhd]." +
|
||||
" If unspecified, the directive never expires.");
|
||||
return getShortUsage() + "\n" +
|
||||
"Modify a cache directive.\n\n" +
|
||||
listing.toString();
|
||||
|
@ -307,6 +333,19 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
builder.setPool(poolName);
|
||||
modified = true;
|
||||
}
|
||||
String ttlString = StringUtils.popOptionWithArgument("-ttl", args);
|
||||
if (ttlString != null) {
|
||||
long ttl;
|
||||
try {
|
||||
ttl = DFSUtil.parseRelativeTime(ttlString);
|
||||
} catch (IOException e) {
|
||||
System.err.println(
|
||||
"Error while parsing ttl value: " + e.getMessage());
|
||||
return 1;
|
||||
}
|
||||
builder.setExpiration(CacheDirectiveInfo.Expiration.newRelative(ttl));
|
||||
modified = true;
|
||||
}
|
||||
if (!args.isEmpty()) {
|
||||
System.err.println("Can't understand argument: " + args.get(0));
|
||||
System.err.println("Usage is " + getShortUsage());
|
||||
|
@ -434,7 +473,8 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
TableListing.Builder tableBuilder = new TableListing.Builder().
|
||||
addField("ID", Justification.RIGHT).
|
||||
addField("POOL", Justification.LEFT).
|
||||
addField("REPLICATION", Justification.RIGHT).
|
||||
addField("REPL", Justification.RIGHT).
|
||||
addField("EXPIRY", Justification.LEFT).
|
||||
addField("PATH", Justification.LEFT);
|
||||
if (printStats) {
|
||||
tableBuilder.addField("NEEDED", Justification.RIGHT).
|
||||
|
@ -455,6 +495,14 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
row.add("" + directive.getId());
|
||||
row.add(directive.getPool());
|
||||
row.add("" + directive.getReplication());
|
||||
String expiry;
|
||||
if (directive.getExpiration().getMillis() ==
|
||||
CacheDirectiveInfo.Expiration.EXPIRY_NEVER) {
|
||||
expiry = "never";
|
||||
} else {
|
||||
expiry = directive.getExpiration().toString();
|
||||
}
|
||||
row.add(expiry);
|
||||
row.add(directive.getPath().toUri().getPath());
|
||||
if (printStats) {
|
||||
row.add("" + stats.getBytesNeeded());
|
||||
|
@ -755,9 +803,10 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
build();
|
||||
int numResults = 0;
|
||||
try {
|
||||
RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
|
||||
RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
|
||||
while (iter.hasNext()) {
|
||||
CachePoolInfo info = iter.next();
|
||||
CachePoolEntry entry = iter.next();
|
||||
CachePoolInfo info = entry.getInfo();
|
||||
String[] row = new String[5];
|
||||
if (name == null || info.getPoolName().equals(name)) {
|
||||
row[0] = info.getPoolName();
|
||||
|
@ -822,14 +871,15 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
return 0;
|
||||
}
|
||||
String commandName = args.get(0);
|
||||
Command command = determineCommand(commandName);
|
||||
// prepend a dash to match against the command names
|
||||
Command command = determineCommand("-"+commandName);
|
||||
if (command == null) {
|
||||
System.err.print("Sorry, I don't know the command '" +
|
||||
commandName + "'.\n");
|
||||
System.err.print("Valid command names are:\n");
|
||||
System.err.print("Valid help command names are:\n");
|
||||
String separator = "";
|
||||
for (Command c : COMMANDS) {
|
||||
System.err.print(separator + c.getName());
|
||||
System.err.print(separator + c.getName().substring(1));
|
||||
separator = ", ";
|
||||
}
|
||||
System.err.print("\n");
|
||||
|
|
|
@ -36,9 +36,10 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
|
||||
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
@ -82,18 +83,28 @@ public class DFSck extends Configured implements Tool {
|
|||
+ "\t-delete\tdelete corrupted files\n"
|
||||
+ "\t-files\tprint out files being checked\n"
|
||||
+ "\t-openforwrite\tprint out files opened for write\n"
|
||||
+ "\t-includeSnapshots\tinclude snapshot data if the given path"
|
||||
+ " indicates a snapshottable directory or there are "
|
||||
+ "snapshottable directories under it\n"
|
||||
+ "\t-list-corruptfileblocks\tprint out list of missing "
|
||||
+ "blocks and files they belong to\n"
|
||||
+ "\t-blocks\tprint out block report\n"
|
||||
+ "\t-locations\tprint out locations for every block\n"
|
||||
+ "\t-racks\tprint out network topology for data-node locations\n"
|
||||
+ "\t\tBy default fsck ignores files opened for write, "
|
||||
+ "\t-racks\tprint out network topology for data-node locations\n\n"
|
||||
+ "Please Note:\n"
|
||||
+ "\t1. By default fsck ignores files opened for write, "
|
||||
+ "use -openforwrite to report such files. They are usually "
|
||||
+ " tagged CORRUPT or HEALTHY depending on their block "
|
||||
+ "allocation status";
|
||||
+ "allocation status\n"
|
||||
+ "\t2. Option -includeSnapshots should not be used for comparing stats,"
|
||||
+ " should be used only for HEALTH check, as this may contain duplicates"
|
||||
+ " if the same file present in both original fs tree "
|
||||
+ "and inside snapshots.";
|
||||
|
||||
private final UserGroupInformation ugi;
|
||||
private final PrintStream out;
|
||||
private final URLConnectionFactory connectionFactory;
|
||||
private final boolean isSpnegoEnabled;
|
||||
|
||||
/**
|
||||
* Filesystem checker.
|
||||
|
@ -107,6 +118,9 @@ public class DFSck extends Configured implements Tool {
|
|||
super(conf);
|
||||
this.ugi = UserGroupInformation.getCurrentUser();
|
||||
this.out = out;
|
||||
this.connectionFactory = URLConnectionFactory
|
||||
.newDefaultURLConnectionFactory(conf);
|
||||
this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -158,7 +172,12 @@ public class DFSck extends Configured implements Tool {
|
|||
url.append("&startblockafter=").append(String.valueOf(cookie));
|
||||
}
|
||||
URL path = new URL(url.toString());
|
||||
URLConnection connection = SecurityUtil.openSecureHttpConnection(path);
|
||||
URLConnection connection;
|
||||
try {
|
||||
connection = connectionFactory.openConnection(path, isSpnegoEnabled);
|
||||
} catch (AuthenticationException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
InputStream stream = connection.getInputStream();
|
||||
BufferedReader input = new BufferedReader(new InputStreamReader(
|
||||
stream, "UTF-8"));
|
||||
|
@ -255,6 +274,8 @@ public class DFSck extends Configured implements Tool {
|
|||
else if (args[idx].equals("-list-corruptfileblocks")) {
|
||||
url.append("&listcorruptfileblocks=1");
|
||||
doListCorruptFileBlocks = true;
|
||||
} else if (args[idx].equals("-includeSnapshots")) {
|
||||
url.append("&includeSnapshots=1");
|
||||
} else if (!args[idx].startsWith("-")) {
|
||||
if (null == dir) {
|
||||
dir = args[idx];
|
||||
|
@ -278,7 +299,12 @@ public class DFSck extends Configured implements Tool {
|
|||
return listCorruptFileBlocks(dir, url.toString());
|
||||
}
|
||||
URL path = new URL(url.toString());
|
||||
URLConnection connection = SecurityUtil.openSecureHttpConnection(path);
|
||||
URLConnection connection;
|
||||
try {
|
||||
connection = connectionFactory.openConnection(path, isSpnegoEnabled);
|
||||
} catch (AuthenticationException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
InputStream stream = connection.getInputStream();
|
||||
BufferedReader input = new BufferedReader(new InputStreamReader(
|
||||
stream, "UTF-8"));
|
||||
|
|
|
@ -145,7 +145,7 @@ public class DelegationTokenFetcher {
|
|||
// default to using the local file system
|
||||
FileSystem local = FileSystem.getLocal(conf);
|
||||
final Path tokenFile = new Path(local.getWorkingDirectory(), remaining[0]);
|
||||
final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||
final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
|
||||
|
||||
// Login the current user
|
||||
UserGroupInformation.getCurrentUser().doAs(
|
||||
|
|
|
@ -176,10 +176,9 @@ public class HftpFileSystem extends FileSystem
|
|||
* Initialize connectionFactory and tokenAspect. This function is intended to
|
||||
* be overridden by HsFtpFileSystem.
|
||||
*/
|
||||
protected void initConnectionFactoryAndTokenAspect(Configuration conf)
|
||||
protected void initTokenAspect(Configuration conf)
|
||||
throws IOException {
|
||||
tokenAspect = new TokenAspect<HftpFileSystem>(this, TOKEN_KIND);
|
||||
connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -187,6 +186,8 @@ public class HftpFileSystem extends FileSystem
|
|||
throws IOException {
|
||||
super.initialize(name, conf);
|
||||
setConf(conf);
|
||||
this.connectionFactory = URLConnectionFactory
|
||||
.newDefaultURLConnectionFactory(conf);
|
||||
this.ugi = UserGroupInformation.getCurrentUser();
|
||||
this.nnUri = getNamenodeUri(name);
|
||||
|
||||
|
@ -197,7 +198,7 @@ public class HftpFileSystem extends FileSystem
|
|||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
|
||||
initConnectionFactoryAndTokenAspect(conf);
|
||||
initTokenAspect(conf);
|
||||
if (UserGroupInformation.isSecurityEnabled()) {
|
||||
tokenAspect.initDelegationToken(ugi);
|
||||
}
|
||||
|
@ -338,7 +339,7 @@ public class HftpFileSystem extends FileSystem
|
|||
}
|
||||
|
||||
static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener {
|
||||
URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||
URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
|
||||
|
||||
RangeHeaderUrlOpener(final URL url) {
|
||||
super(url);
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.apache.hadoop.hdfs.web;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.GeneralSecurityException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -61,18 +60,8 @@ public class HsftpFileSystem extends HftpFileSystem {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void initConnectionFactoryAndTokenAspect(Configuration conf) throws IOException {
|
||||
protected void initTokenAspect(Configuration conf) throws IOException {
|
||||
tokenAspect = new TokenAspect<HftpFileSystem>(this, TOKEN_KIND);
|
||||
|
||||
connectionFactory = new URLConnectionFactory(
|
||||
URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT);
|
||||
try {
|
||||
connectionFactory.setConnConfigurator(URLConnectionFactory
|
||||
.newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
|
||||
conf));
|
||||
} catch (GeneralSecurityException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -17,10 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.web;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.GeneralSecurityException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
|
@ -44,20 +40,6 @@ public class SWebHdfsFileSystem extends WebHdfsFileSystem {
|
|||
tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void initializeConnectionFactory(Configuration conf)
|
||||
throws IOException {
|
||||
connectionFactory = new URLConnectionFactory(
|
||||
URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT);
|
||||
try {
|
||||
connectionFactory.setConnConfigurator(URLConnectionFactory
|
||||
.newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
|
||||
conf));
|
||||
} catch (GeneralSecurityException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int getDefaultPort() {
|
||||
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
|
||||
|
|
|
@ -39,6 +39,8 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
|||
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
|
||||
import org.apache.hadoop.security.ssl.SSLFactory;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
/**
|
||||
* Utilities for handling URLs
|
||||
*/
|
||||
|
@ -54,26 +56,50 @@ public class URLConnectionFactory {
|
|||
* Timeout for socket connects and reads
|
||||
*/
|
||||
public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
|
||||
private final ConnectionConfigurator connConfigurator;
|
||||
|
||||
public static final URLConnectionFactory DEFAULT_CONNECTION_FACTORY = new URLConnectionFactory(
|
||||
DEFAULT_SOCKET_TIMEOUT);
|
||||
|
||||
private int socketTimeout;
|
||||
|
||||
/** Configure connections for AuthenticatedURL */
|
||||
private ConnectionConfigurator connConfigurator = new ConnectionConfigurator() {
|
||||
private static final ConnectionConfigurator DEFAULT_TIMEOUT_CONN_CONFIGURATOR = new ConnectionConfigurator() {
|
||||
@Override
|
||||
public HttpURLConnection configure(HttpURLConnection conn)
|
||||
throws IOException {
|
||||
URLConnectionFactory.setTimeouts(conn, socketTimeout);
|
||||
URLConnectionFactory.setTimeouts(conn, DEFAULT_SOCKET_TIMEOUT);
|
||||
return conn;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* The URLConnectionFactory that sets the default timeout and it only trusts
|
||||
* Java's SSL certificates.
|
||||
*/
|
||||
public static final URLConnectionFactory DEFAULT_SYSTEM_CONNECTION_FACTORY = new URLConnectionFactory(
|
||||
DEFAULT_TIMEOUT_CONN_CONFIGURATOR);
|
||||
|
||||
/**
|
||||
* Construct a new URLConnectionFactory based on the configuration. It will
|
||||
* try to load SSL certificates when it is specified.
|
||||
*/
|
||||
public static URLConnectionFactory newDefaultURLConnectionFactory(Configuration conf) {
|
||||
ConnectionConfigurator conn = null;
|
||||
try {
|
||||
conn = newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf);
|
||||
} catch (Exception e) {
|
||||
LOG.debug(
|
||||
"Cannot load customized ssl related configuration. Fallback to system-generic settings.",
|
||||
e);
|
||||
conn = DEFAULT_TIMEOUT_CONN_CONFIGURATOR;
|
||||
}
|
||||
return new URLConnectionFactory(conn);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
URLConnectionFactory(ConnectionConfigurator connConfigurator) {
|
||||
this.connConfigurator = connConfigurator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new ConnectionConfigurator for SSL connections
|
||||
*/
|
||||
static ConnectionConfigurator newSslConnConfigurator(final int timeout,
|
||||
private static ConnectionConfigurator newSslConnConfigurator(final int timeout,
|
||||
Configuration conf) throws IOException, GeneralSecurityException {
|
||||
final SSLFactory factory;
|
||||
final SSLSocketFactory sf;
|
||||
|
@ -99,10 +125,6 @@ public class URLConnectionFactory {
|
|||
};
|
||||
}
|
||||
|
||||
public URLConnectionFactory(int socketTimeout) {
|
||||
this.socketTimeout = socketTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens a url with read and connect timeouts
|
||||
*
|
||||
|
@ -153,14 +175,6 @@ public class URLConnectionFactory {
|
|||
}
|
||||
}
|
||||
|
||||
public ConnectionConfigurator getConnConfigurator() {
|
||||
return connConfigurator;
|
||||
}
|
||||
|
||||
public void setConnConfigurator(ConnectionConfigurator connConfigurator) {
|
||||
this.connConfigurator = connConfigurator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets timeout parameters on the given URLConnection.
|
||||
*
|
||||
|
@ -169,7 +183,7 @@ public class URLConnectionFactory {
|
|||
* @param socketTimeout
|
||||
* the connection and read timeout of the connection.
|
||||
*/
|
||||
static void setTimeouts(URLConnection connection, int socketTimeout) {
|
||||
private static void setTimeouts(URLConnection connection, int socketTimeout) {
|
||||
connection.setConnectTimeout(socketTimeout);
|
||||
connection.setReadTimeout(socketTimeout);
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
public static final String PATH_PREFIX = "/" + SCHEME + "/v" + VERSION;
|
||||
|
||||
/** Default connection factory may be overridden in tests to use smaller timeout values */
|
||||
URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||
protected URLConnectionFactory connectionFactory;
|
||||
|
||||
/** Delegation token kind */
|
||||
public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
|
||||
|
@ -152,22 +152,15 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize connectionFactory. This function is intended to
|
||||
* be overridden by SWebHdfsFileSystem.
|
||||
*/
|
||||
protected void initializeConnectionFactory(Configuration conf)
|
||||
throws IOException {
|
||||
connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void initialize(URI uri, Configuration conf
|
||||
) throws IOException {
|
||||
super.initialize(uri, conf);
|
||||
setConf(conf);
|
||||
connectionFactory = URLConnectionFactory
|
||||
.newDefaultURLConnectionFactory(conf);
|
||||
initializeTokenAspect();
|
||||
initializeConnectionFactory(conf);
|
||||
|
||||
|
||||
ugi = UserGroupInformation.getCurrentUser();
|
||||
|
||||
|
|
|
@ -369,12 +369,19 @@ message CacheDirectiveInfoProto {
|
|||
optional string path = 2;
|
||||
optional uint32 replication = 3;
|
||||
optional string pool = 4;
|
||||
optional CacheDirectiveInfoExpirationProto expiration = 5;
|
||||
}
|
||||
|
||||
message CacheDirectiveInfoExpirationProto {
|
||||
required int64 millis = 1;
|
||||
required bool isRelative = 2;
|
||||
}
|
||||
|
||||
message CacheDirectiveStatsProto {
|
||||
required int64 bytesNeeded = 1;
|
||||
required int64 bytesCached = 2;
|
||||
required int64 filesAffected = 3;
|
||||
required bool hasExpired = 4;
|
||||
}
|
||||
|
||||
message AddCacheDirectiveRequestProto {
|
||||
|
@ -422,6 +429,12 @@ message CachePoolInfoProto {
|
|||
optional int32 weight = 5;
|
||||
}
|
||||
|
||||
message CachePoolStatsProto {
|
||||
required int64 bytesNeeded = 1;
|
||||
required int64 bytesCached = 2;
|
||||
required int64 filesAffected = 3;
|
||||
}
|
||||
|
||||
message AddCachePoolRequestProto {
|
||||
required CachePoolInfoProto info = 1;
|
||||
}
|
||||
|
@ -448,12 +461,13 @@ message ListCachePoolsRequestProto {
|
|||
}
|
||||
|
||||
message ListCachePoolsResponseProto {
|
||||
repeated ListCachePoolsResponseElementProto elements = 1;
|
||||
repeated CachePoolEntryProto entries = 1;
|
||||
required bool hasMore = 2;
|
||||
}
|
||||
|
||||
message ListCachePoolsResponseElementProto {
|
||||
message CachePoolEntryProto {
|
||||
required CachePoolInfoProto info = 1;
|
||||
required CachePoolStatsProto stats = 2;
|
||||
}
|
||||
|
||||
message GetFileLinkInfoRequestProto {
|
||||
|
|
|
@ -53,11 +53,12 @@ HDFS NFS Gateway
|
|||
* If the client mounts the export with access time update allowed, make sure the following
|
||||
property is not disabled in the configuration file. Only NameNode needs to restart after
|
||||
this property is changed. On some Unix systems, the user can disable access time update
|
||||
by mounting the export with "noatime".
|
||||
by mounting the export with "noatime". If the export is mounted with "noatime", the user
|
||||
doesn't need to change the following property and thus no need to restart namenode.
|
||||
|
||||
----
|
||||
<property>
|
||||
<name>dfs.access.time.precision</name>
|
||||
<name>dfs.namenode.accesstime.precision</name>
|
||||
<value>3600000</value>
|
||||
<description>The access time for HDFS file is precise upto this value.
|
||||
The default value is 1 hour. Setting a value of 0 disables
|
||||
|
|
|
@ -191,21 +191,25 @@ public class TestDFSRollback {
|
|||
// Create a previous snapshot for the blockpool
|
||||
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous",
|
||||
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||
// Older LayoutVersion to make it rollback
|
||||
// Put newer layout version in current.
|
||||
storageInfo = new StorageInfo(
|
||||
UpgradeUtilities.getCurrentLayoutVersion()+1,
|
||||
UpgradeUtilities.getCurrentLayoutVersion()-1,
|
||||
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
||||
UpgradeUtilities.getCurrentClusterID(cluster),
|
||||
UpgradeUtilities.getCurrentFsscTime(cluster));
|
||||
// Create old VERSION file for each data dir
|
||||
|
||||
// Overwrite VERSION file in the current directory of
|
||||
// volume directories and block pool slice directories
|
||||
// with a layout version from future.
|
||||
File[] dataCurrentDirs = new File[dataNodeDirs.length];
|
||||
for (int i=0; i<dataNodeDirs.length; i++) {
|
||||
Path bpPrevPath = new Path(dataNodeDirs[i] + "/current/"
|
||||
+ UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||
UpgradeUtilities.createBlockPoolVersionFile(
|
||||
new File(bpPrevPath.toString()),
|
||||
dataCurrentDirs[i] = new File((new Path(dataNodeDirs[i]
|
||||
+ "/current")).toString());
|
||||
}
|
||||
UpgradeUtilities.createDataNodeVersionFile(
|
||||
dataCurrentDirs,
|
||||
storageInfo,
|
||||
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||
}
|
||||
|
||||
cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
|
||||
assertTrue(cluster.isDataNodeUp());
|
||||
|
|
|
@ -31,6 +31,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_A
|
|||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
|
||||
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
|
||||
import static org.hamcrest.CoreMatchers.not;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
@ -62,6 +63,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|||
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Before;
|
||||
|
@ -724,4 +726,43 @@ public class TestDFSUtil {
|
|||
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
|
||||
DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
|
||||
}
|
||||
|
||||
@Test(timeout=1000)
|
||||
public void testDurationToString() throws Exception {
|
||||
assertEquals("000:00:00:00", DFSUtil.durationToString(0));
|
||||
try {
|
||||
DFSUtil.durationToString(-199);
|
||||
} catch (IllegalArgumentException e) {
|
||||
GenericTestUtils.assertExceptionContains("Invalid negative duration", e);
|
||||
}
|
||||
assertEquals("001:01:01:01",
|
||||
DFSUtil.durationToString(((24*60*60)+(60*60)+(60)+1)*1000));
|
||||
assertEquals("000:23:59:59",
|
||||
DFSUtil.durationToString(((23*60*60)+(59*60)+(59))*1000));
|
||||
}
|
||||
|
||||
@Test(timeout=5000)
|
||||
public void testRelativeTimeConversion() throws Exception {
|
||||
try {
|
||||
DFSUtil.parseRelativeTime("1");
|
||||
} catch (IOException e) {
|
||||
assertExceptionContains("too short", e);
|
||||
}
|
||||
try {
|
||||
DFSUtil.parseRelativeTime("1z");
|
||||
} catch (IOException e) {
|
||||
assertExceptionContains("unknown time unit", e);
|
||||
}
|
||||
try {
|
||||
DFSUtil.parseRelativeTime("yyz");
|
||||
} catch (IOException e) {
|
||||
assertExceptionContains("is not a number", e);
|
||||
}
|
||||
assertEquals(61*1000, DFSUtil.parseRelativeTime("61s"));
|
||||
assertEquals(61*60*1000, DFSUtil.parseRelativeTime("61m"));
|
||||
assertEquals(0, DFSUtil.parseRelativeTime("0s"));
|
||||
assertEquals(25*60*60*1000, DFSUtil.parseRelativeTime("25h"));
|
||||
assertEquals(4*24*60*60*1000, DFSUtil.parseRelativeTime("4d"));
|
||||
assertEquals(999*24*60*60*1000, DFSUtil.parseRelativeTime("999d"));
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue