Merge branch 'trunk' into HDFS-7240

This commit is contained in:
Anu Engineer 2016-04-13 23:13:11 -07:00
commit ae6c0e839b
122 changed files with 4427 additions and 1333 deletions

View File

@ -40,85 +40,26 @@ import java.text.SimpleDateFormat;
import java.util.*; import java.util.*;
/** /**
* <p>The {@link AuthenticationFilter} enables protecting web application * The {@link AuthenticationFilter} enables protecting web application
* resources with different (pluggable) * resources with different (pluggable)
* authentication mechanisms and signer secret providers. * authentication mechanisms and signer secret providers.
* </p>
* <p> * <p>
* Out of the box it provides 2 authentication mechanisms: Pseudo and Kerberos SPNEGO.
* </p>
* Additional authentication mechanisms are supported via the {@link AuthenticationHandler} interface. * Additional authentication mechanisms are supported via the {@link AuthenticationHandler} interface.
* <p> * <p>
* This filter delegates to the configured authentication handler for authentication and once it obtains an * This filter delegates to the configured authentication handler for authentication and once it obtains an
* {@link AuthenticationToken} from it, sets a signed HTTP cookie with the token. For client requests * {@link AuthenticationToken} from it, sets a signed HTTP cookie with the token. For client requests
* that provide the signed HTTP cookie, it verifies the validity of the cookie, extracts the user information * that provide the signed HTTP cookie, it verifies the validity of the cookie, extracts the user information
* and lets the request proceed to the target resource. * and lets the request proceed to the target resource.
* </p>
* The supported configuration properties are:
* <ul>
* <li>config.prefix: indicates the prefix to be used by all other configuration properties, the default value
* is no prefix. See below for details on how/why this prefix is used.</li>
* <li>[#PREFIX#.]type: simple|kerberos|#CLASS#, 'simple' is short for the
* {@link PseudoAuthenticationHandler}, 'kerberos' is short for {@link KerberosAuthenticationHandler}, otherwise
* the full class name of the {@link AuthenticationHandler} must be specified.</li>
* <li>[#PREFIX#.]signature.secret.file: when signer.secret.provider is set to
* "file" or not specified, this is the location of file including the secret
* used to sign the HTTP cookie.</li>
* <li>[#PREFIX#.]token.validity: time -in seconds- that the generated token is
* valid before a new authentication is triggered, default value is
* <code>3600</code> seconds. This is also used for the rollover interval for
* the "random" and "zookeeper" SignerSecretProviders.</li>
* <li>[#PREFIX#.]cookie.domain: domain to use for the HTTP cookie that stores the authentication token.</li>
* <li>[#PREFIX#.]cookie.path: path to use for the HTTP cookie that stores the authentication token.</li>
* </ul>
* <p> * <p>
* The rest of the configuration properties are specific to the {@link AuthenticationHandler} implementation and the * The rest of the configuration properties are specific to the {@link AuthenticationHandler} implementation and the
* {@link AuthenticationFilter} will take all the properties that start with the prefix #PREFIX#, it will remove * {@link AuthenticationFilter} will take all the properties that start with the prefix #PREFIX#, it will remove
* the prefix from it and it will pass them to the the authentication handler for initialization. Properties that do * the prefix from it and it will pass them to the the authentication handler for initialization. Properties that do
* not start with the prefix will not be passed to the authentication handler initialization. * not start with the prefix will not be passed to the authentication handler initialization.
* </p>
* <p> * <p>
* Out of the box it provides 3 signer secret provider implementations: * Details of the configurations are listed on <a href="../../../../../../../Configuration.html">Configuration Page</a>
* "file", "random" and "zookeeper"
* </p>
* Additional signer secret providers are supported via the
* {@link SignerSecretProvider} class.
* <p>
* For the HTTP cookies mentioned above, the SignerSecretProvider is used to
* determine the secret to use for signing the cookies. Different
* implementations can have different behaviors. The "file" implementation
* loads the secret from a specified file. The "random" implementation uses a
* randomly generated secret that rolls over at the interval specified by the
* [#PREFIX#.]token.validity mentioned above. The "zookeeper" implementation
* is like the "random" one, except that it synchronizes the random secret
* and rollovers between multiple servers; it's meant for HA services.
* </p>
* The relevant configuration properties are:
* <ul>
* <li>signer.secret.provider: indicates the name of the SignerSecretProvider
* class to use. Possible values are: "file", "random", "zookeeper", or a
* classname. If not specified, the "file" implementation will be used with
* [#PREFIX#.]signature.secret.file; and if that's not specified, the "random"
* implementation will be used.</li>
* <li>[#PREFIX#.]signature.secret.file: When the "file" implementation is
* specified, this content of this file is used as the secret.</li>
* <li>[#PREFIX#.]token.validity: When the "random" or "zookeeper"
* implementations are specified, this value is used as the rollover
* interval.</li>
* </ul>
* <p> * <p>
* The "zookeeper" implementation has additional configuration properties that * The "zookeeper" implementation has additional configuration properties that
* must be specified; see {@link ZKSignerSecretProvider} for details. * must be specified; see {@link ZKSignerSecretProvider} for details.
* </p>
* For subclasses of AuthenticationFilter that want additional control over the
* SignerSecretProvider, they can use the following attribute set in the
* ServletContext:
* <ul>
* <li>signer.secret.provider.object: A SignerSecretProvider implementation can
* be passed here that will be used instead of the signer.secret.provider
* configuration property. Note that the class should already be
* initialized.</li>
* </ul>
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private

View File

@ -57,34 +57,7 @@ import org.slf4j.LoggerFactory;
* {@link org.apache.hadoop.security.authentication.server.AuthenticationFilter} * {@link org.apache.hadoop.security.authentication.server.AuthenticationFilter}
* for more details. * for more details.
* <p> * <p>
* The supported configuration properties are: * Details of the configurations are listed on <a href="../../../../../../../Configuration.html">Configuration Page</a>
* <ul>
* <li>signer.secret.provider.zookeeper.connection.string: indicates the
* ZooKeeper connection string to connect with.</li>
* <li>signer.secret.provider.zookeeper.path: indicates the ZooKeeper path
* to use for storing and retrieving the secrets. All ZKSignerSecretProviders
* that need to coordinate should point to the same path.</li>
* <li>signer.secret.provider.zookeeper.auth.type: indicates the auth type to
* use. Supported values are "none" and "sasl". The default value is "none"
* </li>
* <li>signer.secret.provider.zookeeper.kerberos.keytab: set this to the path
* with the Kerberos keytab file. This is only required if using Kerberos.</li>
* <li>signer.secret.provider.zookeeper.kerberos.principal: set this to the
* Kerberos principal to use. This only required if using Kerberos.</li>
* <li>signer.secret.provider.zookeeper.disconnect.on.close: when set to "true",
* ZKSignerSecretProvider will close the ZooKeeper connection on shutdown. The
* default is "true". Only set this to "false" if a custom Curator client is
* being provided and the disconnection is being handled elsewhere.</li>
* </ul>
*
* The following attribute in the ServletContext can also be set if desired:
* <ul>
* <li>signer.secret.provider.zookeeper.curator.client: A CuratorFramework
* client object can be passed here. If given, the "zookeeper" implementation
* will use this Curator client instead of creating its own, which is useful if
* you already have a Curator client or want more control over its
* configuration.</li>
* </ul>
*/ */
@InterfaceStability.Unstable @InterfaceStability.Unstable
@InterfaceAudience.Private @InterfaceAudience.Private

View File

@ -34,12 +34,11 @@ Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define the S
* `[PREFIX.]type`: the authentication type keyword (`simple` or \ * `[PREFIX.]type`: the authentication type keyword (`simple` or \
`kerberos`) or a Authentication handler implementation. `kerberos`) or a Authentication handler implementation.
* `[PREFIX.]signature.secret`: When `signer.secret.provider` is set to * `[PREFIX.]signature.secret.file`: When `signer.secret.provider` is set to
`string` or not specified, this is the value for the secret used to sign `file`, this is the location of file including the secret used to sign the HTTP cookie.
the HTTP cookie.
* `[PREFIX.]token.validity`: The validity -in seconds- of the generated * `[PREFIX.]token.validity`: The validity -in seconds- of the generated
authentication token. The default value is `3600` seconds. This is also authentication token. The default value is `36000` seconds. This is also
used for the rollover interval when `signer.secret.provider` is set to used for the rollover interval when `signer.secret.provider` is set to
`random` or `zookeeper`. `random` or `zookeeper`.
@ -50,10 +49,11 @@ Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define the S
authentication token. authentication token.
* `signer.secret.provider`: indicates the name of the SignerSecretProvider * `signer.secret.provider`: indicates the name of the SignerSecretProvider
class to use. Possible values are: `string`, `random`, class to use. Possible values are: `file`, `random`,
`zookeeper`, or a classname. If not specified, the `string` `zookeeper`, or a classname. If not specified, the `file`
implementation will be used; and failing that, the `random` implementation will be used; and failing that, the `random`
implementation will be used. implementation will be used. If "file" is to be used, one need to specify
`signature.secret.file` and point to the secret file.
### Kerberos Configuration ### Kerberos Configuration
@ -232,24 +232,25 @@ The SignerSecretProvider is used to provide more advanced behaviors for the secr
These are the relevant configuration properties: These are the relevant configuration properties:
* `signer.secret.provider`: indicates the name of the * `signer.secret.provider`: indicates the name of the
SignerSecretProvider class to use. Possible values are: "string", SignerSecretProvider class to use. Possible values are: "file",
"random", "zookeeper", or a classname. If not specified, the "string" "random", "zookeeper", or a classname. If not specified, the "file"
implementation will be used; and failing that, the "random" implementation implementation will be used; and failing that, the "random" implementation
will be used. will be used. If "file" is to be used, one need to specify `signature.secret.file`
and point to the secret file.
* `[PREFIX.]signature.secret`: When `signer.secret.provider` is set * `[PREFIX.]signature.secret.file`: When `signer.secret.provider` is set
to `string` or not specified, this is the value for the secret used to to `file` or not specified, this is the value for the secret used to
sign the HTTP cookie. sign the HTTP cookie.
* `[PREFIX.]token.validity`: The validity -in seconds- of the generated * `[PREFIX.]token.validity`: The validity -in seconds- of the generated
authentication token. The default value is `3600` seconds. This is authentication token. The default value is `36000` seconds. This is
also used for the rollover interval when `signer.secret.provider` is also used for the rollover interval when `signer.secret.provider` is
set to `random` or `zookeeper`. set to `random` or `zookeeper`.
The following configuration properties are specific to the `zookeeper` implementation: The following configuration properties are specific to the `zookeeper` implementation:
* `signer.secret.provider.zookeeper.connection.string`: Indicates the * `signer.secret.provider.zookeeper.connection.string`: Indicates the
ZooKeeper connection string to connect with. ZooKeeper connection string to connect with. The default value is `localhost:2181`
* `signer.secret.provider.zookeeper.path`: Indicates the ZooKeeper path * `signer.secret.provider.zookeeper.path`: Indicates the ZooKeeper path
to use for storing and retrieving the secrets. All servers to use for storing and retrieving the secrets. All servers
@ -266,6 +267,17 @@ The following configuration properties are specific to the `zookeeper` implement
* `signer.secret.provider.zookeeper.kerberos.principal`: Set this to the * `signer.secret.provider.zookeeper.kerberos.principal`: Set this to the
Kerberos principal to use. This only required if using Kerberos. Kerberos principal to use. This only required if using Kerberos.
* `signer.secret.provider.zookeeper.disconnect.on.shutdown`: Whether to close the
ZooKeeper connection when the provider is shutdown. The default value is `true`.
Only set this to `false` if a custom Curator client is being provided and
the disconnection is being handled elsewhere.
The following attribute in the ServletContext can also be set if desired:
* `signer.secret.provider.zookeeper.curator.client`: A CuratorFramework client
object can be passed here. If given, the "zookeeper" implementation will use
this Curator client instead of creating its own, which is useful if you already
have a Curator client or want more control over its configuration.
**Example**: **Example**:
```xml ```xml
@ -276,11 +288,11 @@ The following configuration properties are specific to the `zookeeper` implement
<!-- AuthenticationHandler configs not shown --> <!-- AuthenticationHandler configs not shown -->
<init-param> <init-param>
<param-name>signer.secret.provider</param-name> <param-name>signer.secret.provider</param-name>
<param-value>string</param-value> <param-value>file</param-value>
</init-param> </init-param>
<init-param> <init-param>
<param-name>signature.secret</param-name> <param-name>signature.secret.file</param-name>
<param-value>my_secret</param-value> <param-value>/myapp/secret_file</param-value>
</init-param> </init-param>
</filter> </filter>
@ -334,10 +346,6 @@ The following configuration properties are specific to the `zookeeper` implement
<param-name>signer.secret.provider.zookeeper.path</param-name> <param-name>signer.secret.provider.zookeeper.path</param-name>
<param-value>/myapp/secrets</param-value> <param-value>/myapp/secrets</param-value>
</init-param> </init-param>
<init-param>
<param-name>signer.secret.provider.zookeeper.use.kerberos.acls</param-name>
<param-value>true</param-value>
</init-param>
<init-param> <init-param>
<param-name>signer.secret.provider.zookeeper.kerberos.keytab</param-name> <param-name>signer.secret.provider.zookeeper.kerberos.keytab</param-name>
<param-value>/tmp/auth.keytab</param-value> <param-value>/tmp/auth.keytab</param-value>

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.crypto; package org.apache.hadoop.crypto;
import java.io.EOFException;
import java.io.FileDescriptor; import java.io.FileDescriptor;
import java.io.FileInputStream; import java.io.FileInputStream;
import java.io.FilterInputStream; import java.io.FilterInputStream;
@ -34,6 +35,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ByteBufferReadable; import org.apache.hadoop.fs.ByteBufferReadable;
import org.apache.hadoop.fs.CanSetDropBehind; import org.apache.hadoop.fs.CanSetDropBehind;
import org.apache.hadoop.fs.CanSetReadahead; import org.apache.hadoop.fs.CanSetReadahead;
import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.HasEnhancedByteBufferAccess; import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
import org.apache.hadoop.fs.HasFileDescriptor; import org.apache.hadoop.fs.HasFileDescriptor;
import org.apache.hadoop.fs.PositionedReadable; import org.apache.hadoop.fs.PositionedReadable;
@ -395,7 +397,9 @@ public class CryptoInputStream extends FilterInputStream implements
/** Seek to a position. */ /** Seek to a position. */
@Override @Override
public void seek(long pos) throws IOException { public void seek(long pos) throws IOException {
Preconditions.checkArgument(pos >= 0, "Cannot seek to negative offset."); if (pos < 0) {
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
}
checkStream(); checkStream();
try { try {
/* /*

View File

@ -0,0 +1,168 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
/**
* Interface for class that can tell estimate much space
* is used in a directory.
* <p>
* The implementor is fee to cache space used. As such there
* are methods to update the cached value with any known changes.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public abstract class CachingGetSpaceUsed implements Closeable, GetSpaceUsed {
static final Logger LOG = LoggerFactory.getLogger(CachingGetSpaceUsed.class);
protected final AtomicLong used = new AtomicLong();
private final AtomicBoolean running = new AtomicBoolean(true);
private final long refreshInterval;
private final String dirPath;
private Thread refreshUsed;
/**
* This is the constructor used by the builder.
* All overriding classes should implement this.
*/
public CachingGetSpaceUsed(CachingGetSpaceUsed.Builder builder)
throws IOException {
this(builder.getPath(), builder.getInterval(), builder.getInitialUsed());
}
/**
* Keeps track of disk usage.
*
* @param path the path to check disk usage in
* @param interval refresh the disk usage at this interval
* @param initialUsed use this value until next refresh
* @throws IOException if we fail to refresh the disk usage
*/
CachingGetSpaceUsed(File path,
long interval,
long initialUsed) throws IOException {
dirPath = path.getCanonicalPath();
refreshInterval = interval;
used.set(initialUsed);
}
void init() {
if (used.get() < 0) {
used.set(0);
refresh();
}
if (refreshInterval > 0) {
refreshUsed = new Thread(new RefreshThread(this),
"refreshUsed-" + dirPath);
refreshUsed.setDaemon(true);
refreshUsed.start();
} else {
running.set(false);
refreshUsed = null;
}
}
protected abstract void refresh();
/**
* @return an estimate of space used in the directory path.
*/
@Override public long getUsed() throws IOException {
return Math.max(used.get(), 0);
}
/**
* @return The directory path being monitored.
*/
public String getDirPath() {
return dirPath;
}
/**
* Increment the cached value of used space.
*/
public void incDfsUsed(long value) {
used.addAndGet(value);
}
/**
* Is the background thread running.
*/
boolean running() {
return running.get();
}
/**
* How long in between runs of the background refresh.
*/
long getRefreshInterval() {
return refreshInterval;
}
/**
* Reset the current used data amount. This should be called
* when the cached value is re-computed.
*
* @param usedValue new value that should be the disk usage.
*/
protected void setUsed(long usedValue) {
this.used.set(usedValue);
}
@Override
public void close() throws IOException {
running.set(false);
if (refreshUsed != null) {
refreshUsed.interrupt();
}
}
private static final class RefreshThread implements Runnable {
final CachingGetSpaceUsed spaceUsed;
RefreshThread(CachingGetSpaceUsed spaceUsed) {
this.spaceUsed = spaceUsed;
}
@Override
public void run() {
while (spaceUsed.running()) {
try {
Thread.sleep(spaceUsed.getRefreshInterval());
// update the used variable
spaceUsed.refresh();
} catch (InterruptedException e) {
LOG.warn("Thread Interrupted waiting to refresh disk information", e);
Thread.currentThread().interrupt();
}
}
}
}
}

View File

@ -182,20 +182,18 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
public int read(long position, byte[] b, int off, int len) public int read(long position, byte[] b, int off, int len)
throws IOException { throws IOException {
// parameter check // parameter check
if ((off | len | (off + len) | (b.length - (off + len))) < 0) { validatePositionedReadArgs(position, b, off, len);
throw new IndexOutOfBoundsException(); if (len == 0) {
} else if (len == 0) {
return 0; return 0;
} }
if( position<0 ) {
throw new IllegalArgumentException(
"Parameter position can not to be negative");
}
ChecksumFSInputChecker checker = new ChecksumFSInputChecker(fs, file); int nread;
checker.seek(position); try (ChecksumFSInputChecker checker =
int nread = checker.read(b, off, len); new ChecksumFSInputChecker(fs, file)) {
checker.close(); checker.seek(position);
nread = checker.read(b, off, len);
checker.close();
}
return nread; return nread;
} }

View File

@ -169,20 +169,18 @@ public abstract class ChecksumFs extends FilterFs {
public int read(long position, byte[] b, int off, int len) public int read(long position, byte[] b, int off, int len)
throws IOException, UnresolvedLinkException { throws IOException, UnresolvedLinkException {
// parameter check // parameter check
if ((off | len | (off + len) | (b.length - (off + len))) < 0) { validatePositionedReadArgs(position, b, off, len);
throw new IndexOutOfBoundsException(); if (len == 0) {
} else if (len == 0) {
return 0; return 0;
} }
if (position<0) {
throw new IllegalArgumentException(
"Parameter position can not to be negative");
}
ChecksumFSInputChecker checker = new ChecksumFSInputChecker(fs, file); int nread;
checker.seek(position); try (ChecksumFSInputChecker checker =
int nread = checker.read(b, off, len); new ChecksumFSInputChecker(fs, file)) {
checker.close(); checker.seek(position);
nread = checker.read(b, off, len);
checker.close();
}
return nread; return nread;
} }

View File

@ -17,227 +17,73 @@
*/ */
package org.apache.hadoop.fs; package org.apache.hadoop.fs;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell;
import java.io.BufferedReader; import java.io.BufferedReader;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;
/** Filesystem disk space usage statistics. Uses the unix 'du' program*/ /** Filesystem disk space usage statistics. Uses the unix 'du' program */
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class DU extends Shell { public class DU extends CachingGetSpaceUsed {
private String dirPath; private DUShell duShell;
private AtomicLong used = new AtomicLong(); @VisibleForTesting
private volatile boolean shouldRun = true; public DU(File path, long interval, long initialUsed) throws IOException {
private Thread refreshUsed; super(path, interval, initialUsed);
private IOException duException = null;
private long refreshInterval;
/**
* Keeps track of disk usage.
* @param path the path to check disk usage in
* @param interval refresh the disk usage at this interval
* @throws IOException if we fail to refresh the disk usage
*/
public DU(File path, long interval) throws IOException {
this(path, interval, -1L);
} }
/** public DU(CachingGetSpaceUsed.Builder builder) throws IOException {
* Keeps track of disk usage. this(builder.getPath(), builder.getInterval(), builder.getInitialUsed());
* @param path the path to check disk usage in }
* @param interval refresh the disk usage at this interval
* @param initialUsed use this value until next refresh
* @throws IOException if we fail to refresh the disk usage
*/
public DU(File path, long interval, long initialUsed) throws IOException {
super(0);
//we set the Shell interval to 0 so it will always run our command @Override
//and use this one to set the thread sleep interval protected synchronized void refresh() {
this.refreshInterval = interval; if (duShell == null) {
this.dirPath = path.getCanonicalPath(); duShell = new DUShell();
}
//populate the used variable if the initial value is not specified. try {
if (initialUsed < 0) { duShell.startRefresh();
run(); } catch (IOException ioe) {
} else { LOG.warn("Could not get disk usage information", ioe);
this.used.set(initialUsed);
} }
} }
/** private final class DUShell extends Shell {
* Keeps track of disk usage. void startRefresh() throws IOException {
* @param path the path to check disk usage in run();
* @param conf configuration object }
* @throws IOException if we fail to refresh the disk usage @Override
*/ public String toString() {
public DU(File path, Configuration conf) throws IOException { return
this(path, conf, -1L); "du -sk " + getDirPath() + "\n" + used.get() + "\t" + getDirPath();
} }
/**
* Keeps track of disk usage.
* @param path the path to check disk usage in
* @param conf configuration object
* @param initialUsed use it until the next refresh.
* @throws IOException if we fail to refresh the disk usage
*/
public DU(File path, Configuration conf, long initialUsed)
throws IOException {
this(path, conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY,
CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT), initialUsed);
}
/**
* This thread refreshes the "used" variable.
*
* Future improvements could be to not permanently
* run this thread, instead run when getUsed is called.
**/
class DURefreshThread implements Runnable {
@Override @Override
public void run() { protected String[] getExecString() {
return new String[]{"du", "-sk", getDirPath()};
}
while(shouldRun) { @Override
protected void parseExecResult(BufferedReader lines) throws IOException {
try { String line = lines.readLine();
Thread.sleep(refreshInterval); if (line == null) {
throw new IOException("Expecting a line not the end of stream");
try {
//update the used variable
DU.this.run();
} catch (IOException e) {
synchronized (DU.this) {
//save the latest exception so we can return it in getUsed()
duException = e;
}
LOG.warn("Could not get disk usage information", e);
}
} catch (InterruptedException e) {
}
} }
} String[] tokens = line.split("\t");
} if (tokens.length == 0) {
throw new IOException("Illegal du output");
/**
* Decrease how much disk space we use.
* @param value decrease by this value
*/
public void decDfsUsed(long value) {
used.addAndGet(-value);
}
/**
* Increase how much disk space we use.
* @param value increase by this value
*/
public void incDfsUsed(long value) {
used.addAndGet(value);
}
/**
* @return disk space used
* @throws IOException if the shell command fails
*/
public long getUsed() throws IOException {
//if the updating thread isn't started, update on demand
if(refreshUsed == null) {
run();
} else {
synchronized (DU.this) {
//if an exception was thrown in the last run, rethrow
if(duException != null) {
IOException tmp = duException;
duException = null;
throw tmp;
}
} }
setUsed(Long.parseLong(tokens[0]) * 1024);
} }
return Math.max(used.longValue(), 0L);
} }
/**
* @return the path of which we're keeping track of disk usage
*/
public String getDirPath() {
return dirPath;
}
/**
* Override to hook in DUHelper class. Maybe this can be used more
* generally as well on Unix/Linux based systems
*/
@Override
protected void run() throws IOException {
if (WINDOWS) {
used.set(DUHelper.getFolderUsage(dirPath));
return;
}
super.run();
}
/**
* Start the disk usage checking thread.
*/
public void start() {
//only start the thread if the interval is sane
if(refreshInterval > 0) {
refreshUsed = new Thread(new DURefreshThread(),
"refreshUsed-"+dirPath);
refreshUsed.setDaemon(true);
refreshUsed.start();
}
}
/**
* Shut down the refreshing thread.
*/
public void shutdown() {
this.shouldRun = false;
if(this.refreshUsed != null) {
this.refreshUsed.interrupt();
}
}
@Override
public String toString() {
return
"du -sk " + dirPath +"\n" +
used + "\t" + dirPath;
}
@Override
protected String[] getExecString() {
return new String[] {"du", "-sk", dirPath};
}
@Override
protected void parseExecResult(BufferedReader lines) throws IOException {
String line = lines.readLine();
if (line == null) {
throw new IOException("Expecting a line not the end of stream");
}
String[] tokens = line.split("\t");
if(tokens.length == 0) {
throw new IOException("Illegal du output");
}
this.used.set(Long.parseLong(tokens[0])*1024);
}
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
String path = "."; String path = ".";
@ -245,6 +91,10 @@ public class DU extends Shell {
path = args[0]; path = args[0];
} }
System.out.println(new DU(new File(path), new Configuration()).toString()); GetSpaceUsed du = new GetSpaceUsed.Builder().setPath(new File(path))
.setConf(new Configuration())
.build();
String duResult = du.toString();
System.out.println(duResult);
} }
} }

View File

@ -18,18 +18,21 @@
*/ */
package org.apache.hadoop.fs; package org.apache.hadoop.fs;
import java.io.*; import java.io.DataInputStream;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.EnumSet; import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.io.ByteBufferPool;
import org.apache.hadoop.fs.ByteBufferUtil;
import org.apache.hadoop.util.IdentityHashStore; import org.apache.hadoop.util.IdentityHashStore;
/** Utility that wraps a {@link FSInputStream} in a {@link DataInputStream} /** Utility that wraps a {@link FSInputStream} in a {@link DataInputStream}
* and buffers input through a {@link BufferedInputStream}. */ * and buffers input through a {@link java.io.BufferedInputStream}. */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Stable @InterfaceStability.Stable
public class FSDataInputStream extends DataInputStream public class FSDataInputStream extends DataInputStream
@ -97,6 +100,7 @@ public class FSDataInputStream extends DataInputStream
* @param buffer buffer into which data is read * @param buffer buffer into which data is read
* @param offset offset into the buffer in which data is written * @param offset offset into the buffer in which data is written
* @param length the number of bytes to read * @param length the number of bytes to read
* @throws IOException IO problems
* @throws EOFException If the end of stream is reached while reading. * @throws EOFException If the end of stream is reached while reading.
* If an exception is thrown an undetermined number * If an exception is thrown an undetermined number
* of bytes in the buffer may have been written. * of bytes in the buffer may have been written.

View File

@ -40,4 +40,10 @@ public class FSExceptionMessages {
*/ */
public static final String CANNOT_SEEK_PAST_EOF = public static final String CANNOT_SEEK_PAST_EOF =
"Attempted to seek or read past the end of the file"; "Attempted to seek or read past the end of the file";
public static final String EOF_IN_READ_FULLY =
"End of file reached before reading fully.";
public static final String TOO_MANY_BYTES_FOR_DEST_BUFFER
= "Requested more bytes than destination buffer size";
} }

View File

@ -17,22 +17,28 @@
*/ */
package org.apache.hadoop.fs; package org.apache.hadoop.fs;
import java.io.*; import java.io.EOFException;
import java.nio.ByteBuffer; import java.io.IOException;
import java.io.InputStream;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ZeroCopyUnavailableException; import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**************************************************************** /****************************************************************
* FSInputStream is a generic old InputStream with a little bit * FSInputStream is a generic old InputStream with a little bit
* of RAF-style seek ability. * of RAF-style seek ability.
* *
*****************************************************************/ *****************************************************************/
@InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceAudience.Public
@InterfaceStability.Unstable @InterfaceStability.Evolving
public abstract class FSInputStream extends InputStream public abstract class FSInputStream extends InputStream
implements Seekable, PositionedReadable { implements Seekable, PositionedReadable {
private static final Logger LOG =
LoggerFactory.getLogger(FSInputStream.class);
/** /**
* Seek to the given offset from the start of the file. * Seek to the given offset from the start of the file.
* The next read() will be from that location. Can't * The next read() will be from that location. Can't
@ -57,12 +63,21 @@ public abstract class FSInputStream extends InputStream
@Override @Override
public int read(long position, byte[] buffer, int offset, int length) public int read(long position, byte[] buffer, int offset, int length)
throws IOException { throws IOException {
validatePositionedReadArgs(position, buffer, offset, length);
if (length == 0) {
return 0;
}
synchronized (this) { synchronized (this) {
long oldPos = getPos(); long oldPos = getPos();
int nread = -1; int nread = -1;
try { try {
seek(position); seek(position);
nread = read(buffer, offset, length); nread = read(buffer, offset, length);
} catch (EOFException e) {
// end of file; this can be raised by some filesystems
// (often: object stores); it is swallowed here.
LOG.debug("Downgrading EOFException raised trying to" +
" read {} bytes at offset {}", length, offset, e);
} finally { } finally {
seek(oldPos); seek(oldPos);
} }
@ -70,14 +85,42 @@ public abstract class FSInputStream extends InputStream
} }
} }
/**
* Validation code, available for use in subclasses.
* @param position position: if negative an EOF exception is raised
* @param buffer destination buffer
* @param offset offset within the buffer
* @param length length of bytes to read
* @throws EOFException if the position is negative
* @throws IndexOutOfBoundsException if there isn't space for the amount of
* data requested.
* @throws IllegalArgumentException other arguments are invalid.
*/
protected void validatePositionedReadArgs(long position,
byte[] buffer, int offset, int length) throws EOFException {
Preconditions.checkArgument(length >= 0, "length is negative");
if (position < 0) {
throw new EOFException("position is negative");
}
Preconditions.checkArgument(buffer != null, "Null buffer");
if (buffer.length - offset < length) {
throw new IndexOutOfBoundsException(
FSExceptionMessages.TOO_MANY_BYTES_FOR_DEST_BUFFER);
}
}
@Override @Override
public void readFully(long position, byte[] buffer, int offset, int length) public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException { throws IOException {
validatePositionedReadArgs(position, buffer, offset, length);
int nread = 0; int nread = 0;
while (nread < length) { while (nread < length) {
int nbytes = read(position+nread, buffer, offset+nread, length-nread); int nbytes = read(position + nread,
buffer,
offset + nread,
length - nread);
if (nbytes < 0) { if (nbytes < 0) {
throw new EOFException("End of file reached before reading fully."); throw new EOFException(FSExceptionMessages.EOF_IN_READ_FULLY);
} }
nread += nbytes; nread += nbytes;
} }

View File

@ -0,0 +1,147 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
public interface GetSpaceUsed {
long getUsed() throws IOException;
/**
* The builder class
*/
final class Builder {
static final Logger LOG = LoggerFactory.getLogger(Builder.class);
static final String CLASSNAME_KEY = "fs.getspaceused.classname";
private Configuration conf;
private Class<? extends GetSpaceUsed> klass = null;
private File path = null;
private Long interval = null;
private Long initialUsed = null;
public Configuration getConf() {
return conf;
}
public Builder setConf(Configuration conf) {
this.conf = conf;
return this;
}
public long getInterval() {
if (interval != null) {
return interval;
}
long result = CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT;
if (conf == null) {
return result;
}
return conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY, result);
}
public Builder setInterval(long interval) {
this.interval = interval;
return this;
}
public Class<? extends GetSpaceUsed> getKlass() {
if (klass != null) {
return klass;
}
Class<? extends GetSpaceUsed> result = null;
if (Shell.WINDOWS) {
result = WindowsGetSpaceUsed.class;
} else {
result = DU.class;
}
if (conf == null) {
return result;
}
return conf.getClass(CLASSNAME_KEY, result, GetSpaceUsed.class);
}
public Builder setKlass(Class<? extends GetSpaceUsed> klass) {
this.klass = klass;
return this;
}
public File getPath() {
return path;
}
public Builder setPath(File path) {
this.path = path;
return this;
}
public long getInitialUsed() {
if (initialUsed == null) {
return -1;
}
return initialUsed;
}
public Builder setInitialUsed(long initialUsed) {
this.initialUsed = initialUsed;
return this;
}
public GetSpaceUsed build() throws IOException {
GetSpaceUsed getSpaceUsed = null;
try {
Constructor<? extends GetSpaceUsed> cons =
getKlass().getConstructor(Builder.class);
getSpaceUsed = cons.newInstance(this);
} catch (InstantiationException e) {
LOG.warn("Error trying to create an instance of " + getKlass(), e);
} catch (IllegalAccessException e) {
LOG.warn("Error trying to create " + getKlass(), e);
} catch (InvocationTargetException e) {
LOG.warn("Error trying to create " + getKlass(), e);
} catch (NoSuchMethodException e) {
LOG.warn("Doesn't look like the class " + getKlass() +
" have the needed constructor", e);
}
// If there were any exceptions then du will be null.
// Construct our best guess fallback.
if (getSpaceUsed == null) {
if (Shell.WINDOWS) {
getSpaceUsed = new WindowsGetSpaceUsed(this);
} else {
getSpaceUsed = new DU(this);
}
}
// Call init after classes constructors have finished.
if (getSpaceUsed instanceof CachingGetSpaceUsed) {
((CachingGetSpaceUsed) getSpaceUsed).init();
}
return getSpaceUsed;
}
}
}

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader; import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
import java.io.EOFException;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
@ -1053,17 +1054,16 @@ public class HarFileSystem extends FileSystem {
@Override @Override
public void readFully(long pos, byte[] b, int offset, int length) public void readFully(long pos, byte[] b, int offset, int length)
throws IOException { throws IOException {
validatePositionedReadArgs(pos, b, offset, length);
if (length == 0) {
return;
}
if (start + length + pos > end) { if (start + length + pos > end) {
throw new IOException("Not enough bytes to read."); throw new EOFException("Not enough bytes to read.");
} }
underLyingStream.readFully(pos + start, b, offset, length); underLyingStream.readFully(pos + start, b, offset, length);
} }
@Override
public void readFully(long pos, byte[] b) throws IOException {
readFully(pos, b, 0, b.length);
}
@Override @Override
public void setReadahead(Long readahead) throws IOException { public void setReadahead(Long readahead) throws IOException {
underLyingStream.setReadahead(readahead); underLyingStream.setReadahead(readahead);

View File

@ -22,30 +22,67 @@ import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
/** Stream that permits positional reading. */ /**
* Stream that permits positional reading.
*
* Implementations are required to implement thread-safe operations; this may
* be supported by concurrent access to the data, or by using a synchronization
* mechanism to serialize access.
*
* Not all implementations meet this requirement. Those that do not cannot
* be used as a backing store for some applications, such as Apache HBase.
*
* Independent of whether or not they are thread safe, some implementations
* may make the intermediate state of the system, specifically the position
* obtained in {@code Seekable.getPos()} visible.
*/
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Evolving @InterfaceStability.Evolving
public interface PositionedReadable { public interface PositionedReadable {
/** /**
* Read upto the specified number of bytes, from a given * Read up to the specified number of bytes, from a given
* position within a file, and return the number of bytes read. This does not * position within a file, and return the number of bytes read. This does not
* change the current offset of a file, and is thread-safe. * change the current offset of a file, and is thread-safe.
*
* <i>Warning: Not all filesystems satisfy the thread-safety requirement.</i>
* @param position position within file
* @param buffer destination buffer
* @param offset offset in the buffer
* @param length number of bytes to read
* @return actual number of bytes read; -1 means "none"
* @throws IOException IO problems.
*/ */
public int read(long position, byte[] buffer, int offset, int length) int read(long position, byte[] buffer, int offset, int length)
throws IOException; throws IOException;
/** /**
* Read the specified number of bytes, from a given * Read the specified number of bytes, from a given
* position within a file. This does not * position within a file. This does not
* change the current offset of a file, and is thread-safe. * change the current offset of a file, and is thread-safe.
*
* <i>Warning: Not all filesystems satisfy the thread-safety requirement.</i>
* @param position position within file
* @param buffer destination buffer
* @param offset offset in the buffer
* @param length number of bytes to read
* @throws IOException IO problems.
* @throws EOFException the end of the data was reached before
* the read operation completed
*/ */
public void readFully(long position, byte[] buffer, int offset, int length) void readFully(long position, byte[] buffer, int offset, int length)
throws IOException; throws IOException;
/** /**
* Read number of bytes equal to the length of the buffer, from a given * Read number of bytes equal to the length of the buffer, from a given
* position within a file. This does not * position within a file. This does not
* change the current offset of a file, and is thread-safe. * change the current offset of a file, and is thread-safe.
*
* <i>Warning: Not all filesystems satisfy the thread-safety requirement.</i>
* @param position position within file
* @param buffer destination buffer
* @throws IOException IO problems.
* @throws EOFException the end of the data was reached before
* the read operation completed
*/ */
public void readFully(long position, byte[] buffer) throws IOException; void readFully(long position, byte[] buffer) throws IOException;
} }

View File

@ -160,6 +160,8 @@ public class RawLocalFileSystem extends FileSystem {
@Override @Override
public int read(byte[] b, int off, int len) throws IOException { public int read(byte[] b, int off, int len) throws IOException {
// parameter check
validatePositionedReadArgs(position, b, off, len);
try { try {
int value = fis.read(b, off, len); int value = fis.read(b, off, len);
if (value > 0) { if (value > 0) {
@ -175,6 +177,12 @@ public class RawLocalFileSystem extends FileSystem {
@Override @Override
public int read(long position, byte[] b, int off, int len) public int read(long position, byte[] b, int off, int len)
throws IOException { throws IOException {
// parameter check
validatePositionedReadArgs(position, b, off, len);
if (len == 0) {
return 0;
}
ByteBuffer bb = ByteBuffer.wrap(b, off, len); ByteBuffer bb = ByteBuffer.wrap(b, off, len);
try { try {
int value = fis.getChannel().read(bb, position); int value = fis.getChannel().read(bb, position);

View File

@ -0,0 +1,46 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import java.io.IOException;
/**
* Class to tell the size of a path on windows.
* Rather than shelling out, on windows this uses DUHelper.getFolderUsage
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class WindowsGetSpaceUsed extends CachingGetSpaceUsed {
WindowsGetSpaceUsed(CachingGetSpaceUsed.Builder builder) throws IOException {
super(builder.getPath(), builder.getInterval(), builder.getInitialUsed());
}
/**
* Override to hook in DUHelper class.
*/
@Override
protected void refresh() {
used.set(DUHelper.getFolderUsage(getDirPath()));
}
}

View File

@ -55,10 +55,7 @@ import org.apache.hadoop.conf.ConfServlet;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.security.authentication.util.FileSignerSecretProvider;
import org.apache.hadoop.security.authentication.util.RandomSignerSecretProvider;
import org.apache.hadoop.security.authentication.util.SignerSecretProvider; import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
import org.apache.hadoop.security.authentication.util.ZKSignerSecretProvider;
import org.apache.hadoop.security.ssl.SslSocketConnectorSecure; import org.apache.hadoop.security.ssl.SslSocketConnectorSecure;
import org.apache.hadoop.jmx.JMXJsonServlet; import org.apache.hadoop.jmx.JMXJsonServlet;
import org.apache.hadoop.log.LogLevel; import org.apache.hadoop.log.LogLevel;
@ -98,8 +95,6 @@ import com.google.common.base.Preconditions;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import com.sun.jersey.spi.container.servlet.ServletContainer; import com.sun.jersey.spi.container.servlet.ServletContainer;
import static org.apache.hadoop.security.authentication.server
.AuthenticationFilter.*;
/** /**
* Create a Jetty embedded server to answer http requests. The primary goal is * Create a Jetty embedded server to answer http requests. The primary goal is
* to serve up status information for the server. There are three contexts: * to serve up status information for the server. There are three contexts:
@ -1124,9 +1119,11 @@ public final class HttpServer2 implements FilterContainer {
/** /**
* A Servlet input filter that quotes all HTML active characters in the * A Servlet input filter that quotes all HTML active characters in the
* parameter names and values. The goal is to quote the characters to make * parameter names and values. The goal is to quote the characters to make
* all of the servlets resistant to cross-site scripting attacks. * all of the servlets resistant to cross-site scripting attacks. It also
* sets X-FRAME-OPTIONS in the header to mitigate clickjacking attacks.
*/ */
public static class QuotingInputFilter implements Filter { public static class QuotingInputFilter implements Filter {
private static final XFrameOption X_FRAME_OPTION = XFrameOption.SAMEORIGIN;
private FilterConfig config; private FilterConfig config;
public static class RequestQuoter extends HttpServletRequestWrapper { public static class RequestQuoter extends HttpServletRequestWrapper {
@ -1246,6 +1243,7 @@ public final class HttpServer2 implements FilterContainer {
} else if (mime.startsWith("application/xml")) { } else if (mime.startsWith("application/xml")) {
httpResponse.setContentType("text/xml; charset=utf-8"); httpResponse.setContentType("text/xml; charset=utf-8");
} }
httpResponse.addHeader("X-FRAME-OPTIONS", X_FRAME_OPTION.toString());
chain.doFilter(quoted, httpResponse); chain.doFilter(quoted, httpResponse);
} }
@ -1262,4 +1260,23 @@ public final class HttpServer2 implements FilterContainer {
} }
} }
/**
* The X-FRAME-OPTIONS header in HTTP response to mitigate clickjacking
* attack.
*/
public enum XFrameOption {
DENY("DENY") , SAMEORIGIN ("SAMEORIGIN"), ALLOWFROM ("ALLOW-FROM");
XFrameOption(String name) {
this.name = name;
}
private final String name;
@Override
public String toString() {
return this.name;
}
}
} }

View File

@ -181,20 +181,22 @@ public abstract class AbstractMapWritable implements Writable, Configurable {
public void readFields(DataInput in) throws IOException { public void readFields(DataInput in) throws IOException {
// Get the number of "unknown" classes // Get the number of "unknown" classes
newClasses = in.readByte(); newClasses = in.readByte();
// Then read in the class names and add them to our tables // Use the classloader of the current thread to load classes instead of the
// system-classloader so as to support both client-only and inside-a-MR-job
// use-cases. The context-loader by default eventually falls back to the
// system one, so there should be no cases where changing this is an issue.
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
// Then read in the class names and add them to our tables
for (int i = 0; i < newClasses; i++) { for (int i = 0; i < newClasses; i++) {
byte id = in.readByte(); byte id = in.readByte();
String className = in.readUTF(); String className = in.readUTF();
try { try {
addToMap(Class.forName(className), id); addToMap(classLoader.loadClass(className), id);
} catch (ClassNotFoundException e) { } catch (ClassNotFoundException e) {
throw new IOException("can't find class: " + className + " because "+ throw new IOException(e);
e.getMessage());
} }
} }
} }

View File

@ -18,15 +18,15 @@
package org.apache.hadoop.metrics2.lib; package org.apache.hadoop.metrics2.lib;
import java.util.Map;
import java.util.LinkedHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.MetricsTag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.LinkedHashMap;
import java.util.Map;
/** /**
* Helpers to create interned metrics info * Helpers to create interned metrics info
@ -34,7 +34,7 @@ import org.apache.hadoop.metrics2.MetricsTag;
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class Interns { public class Interns {
private static final Log LOG = LogFactory.getLog(Interns.class); private static final Logger LOG = LoggerFactory.getLogger(Interns.class);
// A simple intern cache with two keys // A simple intern cache with two keys
// (to avoid creating new (combined) key objects for lookup) // (to avoid creating new (combined) key objects for lookup)
@ -47,7 +47,7 @@ public class Interns {
protected boolean removeEldestEntry(Map.Entry<K1, Map<K2, V>> e) { protected boolean removeEldestEntry(Map.Entry<K1, Map<K2, V>> e) {
boolean overflow = expireKey1At(size()); boolean overflow = expireKey1At(size());
if (overflow && !gotOverflow) { if (overflow && !gotOverflow) {
LOG.warn("Metrics intern cache overflow at "+ size() +" for "+ e); LOG.info("Metrics intern cache overflow at {} for {}", size(), e);
gotOverflow = true; gotOverflow = true;
} }
return overflow; return overflow;
@ -67,7 +67,7 @@ public class Interns {
@Override protected boolean removeEldestEntry(Map.Entry<K2, V> e) { @Override protected boolean removeEldestEntry(Map.Entry<K2, V> e) {
boolean overflow = expireKey2At(size()); boolean overflow = expireKey2At(size());
if (overflow && !gotOverflow) { if (overflow && !gotOverflow) {
LOG.warn("Metrics intern cache overflow at "+ size() +" for "+ e); LOG.info("Metrics intern cache overflow at {} for {}", size(), e);
gotOverflow = true; gotOverflow = true;
} }
return overflow; return overflow;

View File

@ -81,7 +81,7 @@ public class ShutdownHookManager {
LOG.error("ShutdownHookManger shutdown forcefully."); LOG.error("ShutdownHookManger shutdown forcefully.");
EXECUTOR.shutdownNow(); EXECUTOR.shutdownNow();
} }
LOG.info("ShutdownHookManger complete shutdown."); LOG.debug("ShutdownHookManger complete shutdown.");
} catch (InterruptedException ex) { } catch (InterruptedException ex) {
LOG.error("ShutdownHookManger interrupted while waiting for " + LOG.error("ShutdownHookManger interrupted while waiting for " +
"termination.", ex); "termination.", ex);

View File

@ -51,6 +51,7 @@ The following table lists the configuration property names that are deprecated i
| dfs.secondary.http.address | dfs.namenode.secondary.http-address | | dfs.secondary.http.address | dfs.namenode.secondary.http-address |
| dfs.socket.timeout | dfs.client.socket-timeout | | dfs.socket.timeout | dfs.client.socket-timeout |
| dfs.umaskmode | fs.permissions.umask-mode | | dfs.umaskmode | fs.permissions.umask-mode |
| dfs.web.ugi | hadoop.http.staticuser.user |
| dfs.write.packet.size | dfs.client-write-packet-size | | dfs.write.packet.size | dfs.client-write-packet-size |
| fs.checkpoint.dir | dfs.namenode.checkpoint.dir | | fs.checkpoint.dir | dfs.namenode.checkpoint.dir |
| fs.checkpoint.edits.dir | dfs.namenode.checkpoint.edits.dir | | fs.checkpoint.edits.dir | dfs.namenode.checkpoint.edits.dir |

View File

@ -120,7 +120,8 @@ Return the data at the current position.
### <a name="InputStream.read.buffer[]"></a> `InputStream.read(buffer[], offset, length)` ### <a name="InputStream.read.buffer[]"></a> `InputStream.read(buffer[], offset, length)`
Read `length` bytes of data into the destination buffer, starting at offset Read `length` bytes of data into the destination buffer, starting at offset
`offset` `offset`. The source of the data is the current position of the stream,
as implicitly set in `pos`
#### Preconditions #### Preconditions
@ -129,6 +130,7 @@ Read `length` bytes of data into the destination buffer, starting at offset
length >= 0 length >= 0
offset < len(buffer) offset < len(buffer)
length <= len(buffer) - offset length <= len(buffer) - offset
pos >= 0 else raise EOFException, IOException
Exceptions that may be raised on precondition failure are Exceptions that may be raised on precondition failure are
@ -136,20 +138,39 @@ Exceptions that may be raised on precondition failure are
ArrayIndexOutOfBoundsException ArrayIndexOutOfBoundsException
RuntimeException RuntimeException
Not all filesystems check the `isOpen` state.
#### Postconditions #### Postconditions
if length == 0 : if length == 0 :
result = 0 result = 0
elseif pos > len(data): else if pos > len(data):
result -1 result = -1
else else
let l = min(length, len(data)-length) : let l = min(length, len(data)-length) :
buffer' = buffer where forall i in [0..l-1]: buffer' = buffer where forall i in [0..l-1]:
buffer'[o+i] = data[pos+i] buffer'[o+i] = data[pos+i]
FSDIS' = (pos+l, data, true) FSDIS' = (pos+l, data, true)
result = l result = l
The `java.io` API states that if the amount of data to be read (i.e. `length`)
then the call must block until the amount of data available is greater than
zero —that is, until there is some data. The call is not required to return
when the buffer is full, or indeed block until there is no data left in
the stream.
That is, rather than `l` being simply defined as `min(length, len(data)-length)`,
it strictly is an integer in the range `1..min(length, len(data)-length)`.
While the caller may expect for as much as the buffer as possible to be filled
in, it is within the specification for an implementation to always return
a smaller number, perhaps only ever 1 byte.
What is critical is that unless the destination buffer size is 0, the call
must block until at least one byte is returned. Thus, for any data source
of length greater than zero, repeated invocations of this `read()` operation
will eventually read all the data.
### <a name="Seekable.seek"></a>`Seekable.seek(s)` ### <a name="Seekable.seek"></a>`Seekable.seek(s)`
@ -279,6 +300,9 @@ on the underlying stream:
read(dest3, ... len3) -> dest3[0..len3 - 1] = read(dest3, ... len3) -> dest3[0..len3 - 1] =
[data(FS, path, pos3), data(FS, path, pos3 + 1) ... data(FS, path, pos3 + len3 - 1] [data(FS, path, pos3), data(FS, path, pos3 + 1) ... data(FS, path, pos3 + len3 - 1]
Note that implementations are not required to be atomic; the intermediate state
of the operation (the change in the value of `getPos()`) may be visible.
#### Implementation preconditions #### Implementation preconditions
Not all `FSDataInputStream` implementations support these operations. Those that do Not all `FSDataInputStream` implementations support these operations. Those that do
@ -287,7 +311,7 @@ interface.
supported(FSDIS, Seekable.seek) else raise [UnsupportedOperationException, IOException] supported(FSDIS, Seekable.seek) else raise [UnsupportedOperationException, IOException]
This could be considered obvious: if a stream is not Seekable, a client This could be considered obvious: if a stream is not `Seekable`, a client
cannot seek to a location. It is also a side effect of the cannot seek to a location. It is also a side effect of the
base class implementation, which uses `Seekable.seek()`. base class implementation, which uses `Seekable.seek()`.
@ -304,14 +328,14 @@ For any operations that fail, the contents of the destination
`buffer` are undefined. Implementations may overwrite part `buffer` are undefined. Implementations may overwrite part
or all of the buffer before reporting a failure. or all of the buffer before reporting a failure.
### `int PositionedReadable.read(position, buffer, offset, length)` ### `int PositionedReadable.read(position, buffer, offset, length)`
Read as much data as possible into the buffer space allocated for it.
#### Preconditions #### Preconditions
position > 0 else raise [IllegalArgumentException, RuntimeException] position >= 0 else raise [EOFException, IOException, IllegalArgumentException, RuntimeException]
len(buffer) + offset < len(data) else raise [IndexOutOfBoundException, RuntimeException] len(buffer) - offset >= length else raise [IndexOutOfBoundException, RuntimeException]
length >= 0 length >= 0
offset >= 0 offset >= 0
@ -324,23 +348,36 @@ of data available from the specified position:
buffer'[offset..(offset+available-1)] = data[position..position+available -1] buffer'[offset..(offset+available-1)] = data[position..position+available -1]
result = available result = available
1. A return value of -1 means that the stream had no more available data.
1. An invocation with `length==0` implicitly does not read any data;
implementations may short-cut the operation and omit any IO. In such instances,
checks for the stream being at the end of the file may be omitted.
1. If an IO exception occurs during the read operation(s),
the final state of `buffer` is undefined.
### `void PositionedReadable.readFully(position, buffer, offset, length)` ### `void PositionedReadable.readFully(position, buffer, offset, length)`
Read exactly `length` bytes of data into the buffer, failing if there is not
enough data available.
#### Preconditions #### Preconditions
position > 0 else raise [IllegalArgumentException, RuntimeException] position >= 0 else raise [EOFException, IOException, IllegalArgumentException, RuntimeException]
length >= 0 length >= 0
offset >= 0 offset >= 0
len(buffer) - offset >= length else raise [IndexOutOfBoundException, RuntimeException]
(position + length) <= len(data) else raise [EOFException, IOException] (position + length) <= len(data) else raise [EOFException, IOException]
len(buffer) + offset < len(data)
If an IO exception occurs during the read operation(s),
the final state of `buffer` is undefined.
If there is not enough data in the input stream to satisfy the requests,
the final state of `buffer` is undefined.
#### Postconditions #### Postconditions
The amount of data read is the less of the length or the amount The buffer from offset `offset` is filled with the data starting at `position`
of data available from the specified position:
let available = min(length, len(data)-position)
buffer'[offset..(offset+length-1)] = data[position..(position + length -1)] buffer'[offset..(offset+length-1)] = data[position..(position + length -1)]
### `PositionedReadable.readFully(position, buffer)` ### `PositionedReadable.readFully(position, buffer)`
@ -349,6 +386,9 @@ The semantics of this are exactly equivalent to
readFully(position, buffer, 0, len(buffer)) readFully(position, buffer, 0, len(buffer))
That is, the buffer is filled entirely with the contents of the input source
from position `position`
## Consistency ## Consistency

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.crypto; package org.apache.hadoop.crypto;
import java.io.EOFException;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
@ -29,6 +30,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.ByteBufferReadable; import org.apache.hadoop.fs.ByteBufferReadable;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.HasEnhancedByteBufferAccess; import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
import org.apache.hadoop.fs.PositionedReadable; import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.ReadOption; import org.apache.hadoop.fs.ReadOption;
@ -339,7 +341,7 @@ public abstract class CryptoStreamsTestBase {
try { try {
((PositionedReadable) in).readFully(pos, result); ((PositionedReadable) in).readFully(pos, result);
Assert.fail("Read fully exceeds maximum length should fail."); Assert.fail("Read fully exceeds maximum length should fail.");
} catch (IOException e) { } catch (EOFException e) {
} }
} }
@ -365,9 +367,9 @@ public abstract class CryptoStreamsTestBase {
try { try {
seekCheck(in, -3); seekCheck(in, -3);
Assert.fail("Seek to negative offset should fail."); Assert.fail("Seek to negative offset should fail.");
} catch (IllegalArgumentException e) { } catch (EOFException e) {
GenericTestUtils.assertExceptionContains("Cannot seek to negative " + GenericTestUtils.assertExceptionContains(
"offset", e); FSExceptionMessages.NEGATIVE_SEEK, e);
} }
Assert.assertEquals(pos, ((Seekable) in).getPos()); Assert.assertEquals(pos, ((Seekable) in).getPos());

View File

@ -273,6 +273,7 @@ public abstract class FileContextMainOperationsBaseTest {
} }
} }
@Test
public void testListStatusThrowsExceptionForNonExistentFile() public void testListStatusThrowsExceptionForNonExistentFile()
throws Exception { throws Exception {
try { try {

View File

@ -79,27 +79,28 @@ public class TestDU extends TestCase {
Thread.sleep(5000); // let the metadata updater catch up Thread.sleep(5000); // let the metadata updater catch up
DU du = new DU(file, 10000); DU du = new DU(file, 10000, -1);
du.start(); du.init();
long duSize = du.getUsed(); long duSize = du.getUsed();
du.shutdown(); du.close();
assertTrue("Invalid on-disk size", assertTrue("Invalid on-disk size",
duSize >= writtenSize && duSize >= writtenSize &&
writtenSize <= (duSize + slack)); writtenSize <= (duSize + slack));
//test with 0 interval, will not launch thread //test with 0 interval, will not launch thread
du = new DU(file, 0); du = new DU(file, 0, -1);
du.start(); du.init();
duSize = du.getUsed(); duSize = du.getUsed();
du.shutdown(); du.close();
assertTrue("Invalid on-disk size", assertTrue("Invalid on-disk size",
duSize >= writtenSize && duSize >= writtenSize &&
writtenSize <= (duSize + slack)); writtenSize <= (duSize + slack));
//test without launching thread //test without launching thread
du = new DU(file, 10000); du = new DU(file, 10000, -1);
du.init();
duSize = du.getUsed(); duSize = du.getUsed();
assertTrue("Invalid on-disk size", assertTrue("Invalid on-disk size",
@ -111,8 +112,8 @@ public class TestDU extends TestCase {
assertTrue(file.createNewFile()); assertTrue(file.createNewFile());
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY, 10000L); conf.setLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY, 10000L);
DU du = new DU(file, conf); DU du = new DU(file, 10000L, -1);
du.decDfsUsed(Long.MAX_VALUE); du.incDfsUsed(-Long.MAX_VALUE);
long duSize = du.getUsed(); long duSize = du.getUsed();
assertTrue(String.valueOf(duSize), duSize >= 0L); assertTrue(String.valueOf(duSize), duSize >= 0L);
} }
@ -121,7 +122,7 @@ public class TestDU extends TestCase {
File file = new File(DU_DIR, "dataX"); File file = new File(DU_DIR, "dataX");
createFile(file, 8192); createFile(file, 8192);
DU du = new DU(file, 3000, 1024); DU du = new DU(file, 3000, 1024);
du.start(); du.init();
assertTrue("Initial usage setting not honored", du.getUsed() == 1024); assertTrue("Initial usage setting not honored", du.getUsed() == 1024);
// wait until the first du runs. // wait until the first du runs.
@ -131,4 +132,7 @@ public class TestDU extends TestCase {
assertTrue("Usage didn't get updated", du.getUsed() == 8192); assertTrue("Usage didn't get updated", du.getUsed() == 8192);
} }
} }

View File

@ -0,0 +1,133 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import static org.junit.Assert.*;
public class TestGetSpaceUsed {
final static private File DIR = new File(
System.getProperty("test.build.data", "/tmp"), "TestGetSpaceUsed");
@Before
public void setUp() {
FileUtil.fullyDelete(DIR);
assertTrue(DIR.mkdirs());
}
@After
public void tearDown() throws IOException {
FileUtil.fullyDelete(DIR);
}
/**
* Test that the builder can create a class specified through the class.
*/
@Test
public void testBuilderConf() throws Exception {
File file = new File(DIR, "testBuilderConf");
assertTrue(file.createNewFile());
Configuration conf = new Configuration();
conf.set("fs.getspaceused.classname", DummyDU.class.getName());
CachingGetSpaceUsed instance =
(CachingGetSpaceUsed) new CachingGetSpaceUsed.Builder()
.setPath(file)
.setInterval(0)
.setConf(conf)
.build();
assertNotNull(instance);
assertTrue(instance instanceof DummyDU);
assertFalse(instance.running());
instance.close();
}
@Test
public void testBuildInitial() throws Exception {
File file = new File(DIR, "testBuildInitial");
assertTrue(file.createNewFile());
CachingGetSpaceUsed instance =
(CachingGetSpaceUsed) new CachingGetSpaceUsed.Builder()
.setPath(file)
.setInitialUsed(90210)
.setKlass(DummyDU.class)
.build();
assertEquals(90210, instance.getUsed());
instance.close();
}
@Test
public void testBuildInterval() throws Exception {
File file = new File(DIR, "testBuildInitial");
assertTrue(file.createNewFile());
CachingGetSpaceUsed instance =
(CachingGetSpaceUsed) new CachingGetSpaceUsed.Builder()
.setPath(file)
.setInitialUsed(90210)
.setInterval(50060)
.setKlass(DummyDU.class)
.build();
assertEquals(50060, instance.getRefreshInterval());
instance.close();
}
@Test
public void testBuildNonCaching() throws Exception {
File file = new File(DIR, "testBuildNonCaching");
assertTrue(file.createNewFile());
GetSpaceUsed instance = new CachingGetSpaceUsed.Builder()
.setPath(file)
.setInitialUsed(90210)
.setInterval(50060)
.setKlass(DummyGetSpaceUsed.class)
.build();
assertEquals(300, instance.getUsed());
assertTrue(instance instanceof DummyGetSpaceUsed);
}
private static class DummyDU extends CachingGetSpaceUsed {
public DummyDU(Builder builder) throws IOException {
// Push to the base class.
// Most times that's all that will need to be done.
super(builder);
}
@Override
protected void refresh() {
// This is a test so don't du anything.
}
}
private static class DummyGetSpaceUsed implements GetSpaceUsed {
public DummyGetSpaceUsed(GetSpaceUsed.Builder builder) {
}
@Override public long getUsed() throws IOException {
return 300;
}
}
}

View File

@ -52,11 +52,8 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB
public void testAppendToEmptyFile() throws Throwable { public void testAppendToEmptyFile() throws Throwable {
touch(getFileSystem(), target); touch(getFileSystem(), target);
byte[] dataset = dataset(256, 'a', 'z'); byte[] dataset = dataset(256, 'a', 'z');
FSDataOutputStream outputStream = getFileSystem().append(target); try (FSDataOutputStream outputStream = getFileSystem().append(target)) {
try {
outputStream.write(dataset); outputStream.write(dataset);
} finally {
outputStream.close();
} }
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target, byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
dataset.length); dataset.length);

View File

@ -53,7 +53,7 @@ public abstract class AbstractContractConcatTest extends AbstractFSContractTestB
target = new Path(testPath, "target"); target = new Path(testPath, "target");
byte[] block = dataset(TEST_FILE_LEN, 0, 255); byte[] block = dataset(TEST_FILE_LEN, 0, 255);
createFile(getFileSystem(), srcFile, false, block); createFile(getFileSystem(), srcFile, true, block);
touch(getFileSystem(), zeroByteFile); touch(getFileSystem(), zeroByteFile);
} }

View File

@ -123,7 +123,7 @@ public abstract class AbstractContractCreateTest extends
} catch (AssertionError failure) { } catch (AssertionError failure) {
if (isSupported(IS_BLOBSTORE)) { if (isSupported(IS_BLOBSTORE)) {
// file/directory hack surfaces here // file/directory hack surfaces here
throw new AssumptionViolatedException(failure.toString()).initCause(failure); throw new AssumptionViolatedException(failure.toString(), failure);
} }
// else: rethrow // else: rethrow
throw failure; throw failure;
@ -163,13 +163,11 @@ public abstract class AbstractContractCreateTest extends
public void testCreatedFileIsImmediatelyVisible() throws Throwable { public void testCreatedFileIsImmediatelyVisible() throws Throwable {
describe("verify that a newly created file exists as soon as open returns"); describe("verify that a newly created file exists as soon as open returns");
Path path = path("testCreatedFileIsImmediatelyVisible"); Path path = path("testCreatedFileIsImmediatelyVisible");
FSDataOutputStream out = null; try(FSDataOutputStream out = getFileSystem().create(path,
try {
out = getFileSystem().create(path,
false, false,
4096, 4096,
(short) 1, (short) 1,
1024); 1024)) {
if (!getFileSystem().exists(path)) { if (!getFileSystem().exists(path)) {
if (isSupported(IS_BLOBSTORE)) { if (isSupported(IS_BLOBSTORE)) {
@ -180,8 +178,6 @@ public abstract class AbstractContractCreateTest extends
assertPathExists("expected path to be visible before anything written", assertPathExists("expected path to be visible before anything written",
path); path);
} }
} finally {
IOUtils.closeStream(out);
} }
} }
} }

View File

@ -47,7 +47,7 @@ public abstract class AbstractContractDeleteTest extends
@Test @Test
public void testDeleteNonexistentPathRecursive() throws Throwable { public void testDeleteNonexistentPathRecursive() throws Throwable {
Path path = path("testDeleteNonexistentPathRecursive"); Path path = path("testDeleteNonexistentPathRecursive");
ContractTestUtils.assertPathDoesNotExist(getFileSystem(), "leftover", path); assertPathDoesNotExist("leftover", path);
ContractTestUtils.rejectRootOperation(path); ContractTestUtils.rejectRootOperation(path);
assertFalse("Returned true attempting to delete" assertFalse("Returned true attempting to delete"
+ " a nonexistent path " + path, + " a nonexistent path " + path,
@ -58,7 +58,7 @@ public abstract class AbstractContractDeleteTest extends
@Test @Test
public void testDeleteNonexistentPathNonRecursive() throws Throwable { public void testDeleteNonexistentPathNonRecursive() throws Throwable {
Path path = path("testDeleteNonexistentPathNonRecursive"); Path path = path("testDeleteNonexistentPathNonRecursive");
ContractTestUtils.assertPathDoesNotExist(getFileSystem(), "leftover", path); assertPathDoesNotExist("leftover", path);
ContractTestUtils.rejectRootOperation(path); ContractTestUtils.rejectRootOperation(path);
assertFalse("Returned true attempting to recursively delete" assertFalse("Returned true attempting to recursively delete"
+ " a nonexistent path " + path, + " a nonexistent path " + path,
@ -81,7 +81,7 @@ public abstract class AbstractContractDeleteTest extends
//expected //expected
handleExpectedException(expected); handleExpectedException(expected);
} }
ContractTestUtils.assertIsDirectory(getFileSystem(), path); assertIsDirectory(path);
} }
@Test @Test
@ -92,7 +92,7 @@ public abstract class AbstractContractDeleteTest extends
ContractTestUtils.writeTextFile(getFileSystem(), file, "goodbye, world", ContractTestUtils.writeTextFile(getFileSystem(), file, "goodbye, world",
true); true);
assertDeleted(path, true); assertDeleted(path, true);
ContractTestUtils.assertPathDoesNotExist(getFileSystem(), "not deleted", file); assertPathDoesNotExist("not deleted", file);
} }
@Test @Test
@ -100,12 +100,11 @@ public abstract class AbstractContractDeleteTest extends
mkdirs(path("testDeleteDeepEmptyDir/d1/d2/d3/d4")); mkdirs(path("testDeleteDeepEmptyDir/d1/d2/d3/d4"));
assertDeleted(path("testDeleteDeepEmptyDir/d1/d2/d3"), true); assertDeleted(path("testDeleteDeepEmptyDir/d1/d2/d3"), true);
FileSystem fs = getFileSystem(); assertPathDoesNotExist(
ContractTestUtils.assertPathDoesNotExist(fs,
"not deleted", path("testDeleteDeepEmptyDir/d1/d2/d3/d4")); "not deleted", path("testDeleteDeepEmptyDir/d1/d2/d3/d4"));
ContractTestUtils.assertPathDoesNotExist(fs, assertPathDoesNotExist(
"not deleted", path("testDeleteDeepEmptyDir/d1/d2/d3")); "not deleted", path("testDeleteDeepEmptyDir/d1/d2/d3"));
ContractTestUtils.assertPathExists(fs, "parent dir is deleted", assertPathExists( "parent dir is deleted",
path("testDeleteDeepEmptyDir/d1/d2")); path("testDeleteDeepEmptyDir/d1/d2"));
} }
@ -117,8 +116,7 @@ public abstract class AbstractContractDeleteTest extends
Path file = new Path(path, "childfile"); Path file = new Path(path, "childfile");
ContractTestUtils.writeTextFile(getFileSystem(), file, ContractTestUtils.writeTextFile(getFileSystem(), file,
"single file to be deleted.", true); "single file to be deleted.", true);
ContractTestUtils.assertPathExists(getFileSystem(), assertPathExists("single file not created", file);
"single file not created", file);
assertDeleted(file, false); assertDeleted(file, false);
} }
} }

View File

@ -67,12 +67,9 @@ public abstract class AbstractContractMkdirTest extends AbstractFSContractTestBa
boolean made = fs.mkdirs(path); boolean made = fs.mkdirs(path);
fail("mkdirs did not fail over a file but returned " + made fail("mkdirs did not fail over a file but returned " + made
+ "; " + ls(path)); + "; " + ls(path));
} catch (ParentNotDirectoryException e) { } catch (ParentNotDirectoryException | FileAlreadyExistsException e) {
//parent is a directory //parent is a directory
handleExpectedException(e); handleExpectedException(e);
} catch (FileAlreadyExistsException e) {
//also allowed as an exception (HDFS)
handleExpectedException(e);;
} catch (IOException e) { } catch (IOException e) {
//here the FS says "no create" //here the FS says "no create"
handleRelaxedException("mkdirs", "FileAlreadyExistsException", e); handleRelaxedException("mkdirs", "FileAlreadyExistsException", e);
@ -97,11 +94,9 @@ public abstract class AbstractContractMkdirTest extends AbstractFSContractTestBa
boolean made = fs.mkdirs(child); boolean made = fs.mkdirs(child);
fail("mkdirs did not fail over a file but returned " + made fail("mkdirs did not fail over a file but returned " + made
+ "; " + ls(path)); + "; " + ls(path));
} catch (ParentNotDirectoryException e) { } catch (ParentNotDirectoryException | FileAlreadyExistsException e) {
//parent is a directory //parent is a directory
handleExpectedException(e); handleExpectedException(e);
} catch (FileAlreadyExistsException e) {
handleExpectedException(e);
} catch (IOException e) { } catch (IOException e) {
handleRelaxedException("mkdirs", "ParentNotDirectoryException", e); handleRelaxedException("mkdirs", "ParentNotDirectoryException", e);
} }

View File

@ -125,10 +125,10 @@ public abstract class AbstractContractOpenTest extends AbstractFSContractTestBas
createFile(getFileSystem(), path, false, block); createFile(getFileSystem(), path, false, block);
//open first //open first
FSDataInputStream instream1 = getFileSystem().open(path); FSDataInputStream instream1 = getFileSystem().open(path);
int c = instream1.read();
assertEquals(0,c);
FSDataInputStream instream2 = null; FSDataInputStream instream2 = null;
try { try {
int c = instream1.read();
assertEquals(0,c);
instream2 = getFileSystem().open(path); instream2 = getFileSystem().open(path);
assertEquals("first read of instream 2", 0, instream2.read()); assertEquals("first read of instream 2", 0, instream2.read());
assertEquals("second read of instream 1", 1, instream1.read()); assertEquals("second read of instream 1", 1, instream1.read());

View File

@ -26,8 +26,7 @@ import org.junit.Test;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset;
/** /**
* Test creating files, overwrite options &c * Test creating files, overwrite options &c
@ -46,9 +45,9 @@ public abstract class AbstractContractRenameTest extends
boolean rename = rename(renameSrc, renameTarget); boolean rename = rename(renameSrc, renameTarget);
assertTrue("rename("+renameSrc+", "+ renameTarget+") returned false", assertTrue("rename("+renameSrc+", "+ renameTarget+") returned false",
rename); rename);
ContractTestUtils.assertListStatusFinds(getFileSystem(), assertListStatusFinds(getFileSystem(),
renameTarget.getParent(), renameTarget); renameTarget.getParent(), renameTarget);
ContractTestUtils.verifyFileContents(getFileSystem(), renameTarget, data); verifyFileContents(getFileSystem(), renameTarget, data);
} }
@Test @Test
@ -129,7 +128,7 @@ public abstract class AbstractContractRenameTest extends
} }
// verify that the destination file is as expected based on the expected // verify that the destination file is as expected based on the expected
// outcome // outcome
ContractTestUtils.verifyFileContents(getFileSystem(), destFile, verifyFileContents(getFileSystem(), destFile,
destUnchanged? destData: srcData); destUnchanged? destData: srcData);
} }
@ -154,7 +153,7 @@ public abstract class AbstractContractRenameTest extends
Path renamedSrc = new Path(destDir, sourceSubdir); Path renamedSrc = new Path(destDir, sourceSubdir);
assertIsFile(destFilePath); assertIsFile(destFilePath);
assertIsDirectory(renamedSrc); assertIsDirectory(renamedSrc);
ContractTestUtils.verifyFileContents(fs, destFilePath, destDateset); verifyFileContents(fs, destFilePath, destDateset);
assertTrue("rename returned false though the contents were copied", rename); assertTrue("rename returned false though the contents were copied", rename);
} }
@ -172,10 +171,10 @@ public abstract class AbstractContractRenameTest extends
boolean rename = rename(renameSrc, renameTarget); boolean rename = rename(renameSrc, renameTarget);
if (renameCreatesDestDirs) { if (renameCreatesDestDirs) {
assertTrue(rename); assertTrue(rename);
ContractTestUtils.verifyFileContents(getFileSystem(), renameTarget, data); verifyFileContents(getFileSystem(), renameTarget, data);
} else { } else {
assertFalse(rename); assertFalse(rename);
ContractTestUtils.verifyFileContents(getFileSystem(), renameSrc, data); verifyFileContents(getFileSystem(), renameSrc, data);
} }
} catch (FileNotFoundException e) { } catch (FileNotFoundException e) {
// allowed unless that rename flag is set // allowed unless that rename flag is set
@ -191,36 +190,36 @@ public abstract class AbstractContractRenameTest extends
final Path finalDir = new Path(renameTestDir, "dest"); final Path finalDir = new Path(renameTestDir, "dest");
FileSystem fs = getFileSystem(); FileSystem fs = getFileSystem();
boolean renameRemoveEmptyDest = isSupported(RENAME_REMOVE_DEST_IF_EMPTY_DIR); boolean renameRemoveEmptyDest = isSupported(RENAME_REMOVE_DEST_IF_EMPTY_DIR);
ContractTestUtils.rm(fs, renameTestDir, true, false); rm(fs, renameTestDir, true, false);
fs.mkdirs(srcDir); fs.mkdirs(srcDir);
fs.mkdirs(finalDir); fs.mkdirs(finalDir);
ContractTestUtils.writeTextFile(fs, new Path(srcDir, "source.txt"), writeTextFile(fs, new Path(srcDir, "source.txt"),
"this is the file in src dir", false); "this is the file in src dir", false);
ContractTestUtils.writeTextFile(fs, new Path(srcSubDir, "subfile.txt"), writeTextFile(fs, new Path(srcSubDir, "subfile.txt"),
"this is the file in src/sub dir", false); "this is the file in src/sub dir", false);
ContractTestUtils.assertPathExists(fs, "not created in src dir", assertPathExists("not created in src dir",
new Path(srcDir, "source.txt")); new Path(srcDir, "source.txt"));
ContractTestUtils.assertPathExists(fs, "not created in src/sub dir", assertPathExists("not created in src/sub dir",
new Path(srcSubDir, "subfile.txt")); new Path(srcSubDir, "subfile.txt"));
fs.rename(srcDir, finalDir); fs.rename(srcDir, finalDir);
// Accept both POSIX rename behavior and CLI rename behavior // Accept both POSIX rename behavior and CLI rename behavior
if (renameRemoveEmptyDest) { if (renameRemoveEmptyDest) {
// POSIX rename behavior // POSIX rename behavior
ContractTestUtils.assertPathExists(fs, "not renamed into dest dir", assertPathExists("not renamed into dest dir",
new Path(finalDir, "source.txt")); new Path(finalDir, "source.txt"));
ContractTestUtils.assertPathExists(fs, "not renamed into dest/sub dir", assertPathExists("not renamed into dest/sub dir",
new Path(finalDir, "sub/subfile.txt")); new Path(finalDir, "sub/subfile.txt"));
} else { } else {
// CLI rename behavior // CLI rename behavior
ContractTestUtils.assertPathExists(fs, "not renamed into dest dir", assertPathExists("not renamed into dest dir",
new Path(finalDir, "src1/source.txt")); new Path(finalDir, "src1/source.txt"));
ContractTestUtils.assertPathExists(fs, "not renamed into dest/sub dir", assertPathExists("not renamed into dest/sub dir",
new Path(finalDir, "src1/sub/subfile.txt")); new Path(finalDir, "src1/sub/subfile.txt"));
} }
ContractTestUtils.assertPathDoesNotExist(fs, "not deleted", assertPathDoesNotExist("not deleted",
new Path(srcDir, "source.txt")); new Path(srcDir, "source.txt"));
} }
} }

View File

@ -51,7 +51,7 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra
Path dir = new Path("/testmkdirdepth1"); Path dir = new Path("/testmkdirdepth1");
assertPathDoesNotExist("directory already exists", dir); assertPathDoesNotExist("directory already exists", dir);
fs.mkdirs(dir); fs.mkdirs(dir);
ContractTestUtils.assertIsDirectory(getFileSystem(), dir); assertIsDirectory(dir);
assertPathExists("directory already exists", dir); assertPathExists("directory already exists", dir);
assertDeleted(dir, true); assertDeleted(dir, true);
} }
@ -61,10 +61,10 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra
//extra sanity checks here to avoid support calls about complete loss of data //extra sanity checks here to avoid support calls about complete loss of data
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED); skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
Path root = new Path("/"); Path root = new Path("/");
ContractTestUtils.assertIsDirectory(getFileSystem(), root); assertIsDirectory(root);
boolean deleted = getFileSystem().delete(root, true); boolean deleted = getFileSystem().delete(root, true);
LOG.info("rm / of empty dir result is {}", deleted); LOG.info("rm / of empty dir result is {}", deleted);
ContractTestUtils.assertIsDirectory(getFileSystem(), root); assertIsDirectory(root);
} }
@Test @Test
@ -75,7 +75,7 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra
String touchfile = "/testRmNonEmptyRootDirNonRecursive"; String touchfile = "/testRmNonEmptyRootDirNonRecursive";
Path file = new Path(touchfile); Path file = new Path(touchfile);
ContractTestUtils.touch(getFileSystem(), file); ContractTestUtils.touch(getFileSystem(), file);
ContractTestUtils.assertIsDirectory(getFileSystem(), root); assertIsDirectory(root);
try { try {
boolean deleted = getFileSystem().delete(root, false); boolean deleted = getFileSystem().delete(root, false);
fail("non recursive delete should have raised an exception," + fail("non recursive delete should have raised an exception," +
@ -86,7 +86,7 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra
} finally { } finally {
getFileSystem().delete(file, false); getFileSystem().delete(file, false);
} }
ContractTestUtils.assertIsDirectory(getFileSystem(), root); assertIsDirectory(root);
} }
@Test @Test
@ -94,11 +94,11 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra
//extra sanity checks here to avoid support calls about complete loss of data //extra sanity checks here to avoid support calls about complete loss of data
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED); skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
Path root = new Path("/"); Path root = new Path("/");
ContractTestUtils.assertIsDirectory(getFileSystem(), root); assertIsDirectory(root);
Path file = new Path("/testRmRootRecursive"); Path file = new Path("/testRmRootRecursive");
ContractTestUtils.touch(getFileSystem(), file); ContractTestUtils.touch(getFileSystem(), file);
boolean deleted = getFileSystem().delete(root, true); boolean deleted = getFileSystem().delete(root, true);
ContractTestUtils.assertIsDirectory(getFileSystem(), root); assertIsDirectory(root);
LOG.info("rm -rf / result is {}", deleted); LOG.info("rm -rf / result is {}", deleted);
if (deleted) { if (deleted) {
assertPathDoesNotExist("expected file to be deleted", file); assertPathDoesNotExist("expected file to be deleted", file);

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.fs.contract;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.junit.Test; import org.junit.Test;
@ -31,9 +32,9 @@ import java.io.EOFException;
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.Random;
import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyRead; import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyRead;
@ -46,7 +47,6 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
public static final int DEFAULT_RANDOM_SEEK_COUNT = 100; public static final int DEFAULT_RANDOM_SEEK_COUNT = 100;
private Path testPath;
private Path smallSeekFile; private Path smallSeekFile;
private Path zeroByteFile; private Path zeroByteFile;
private FSDataInputStream instream; private FSDataInputStream instream;
@ -56,13 +56,13 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
super.setup(); super.setup();
skipIfUnsupported(SUPPORTS_SEEK); skipIfUnsupported(SUPPORTS_SEEK);
//delete the test directory //delete the test directory
testPath = getContract().getTestPath();
smallSeekFile = path("seekfile.txt"); smallSeekFile = path("seekfile.txt");
zeroByteFile = path("zero.txt"); zeroByteFile = path("zero.txt");
byte[] block = dataset(TEST_FILE_LEN, 0, 255); byte[] block = dataset(TEST_FILE_LEN, 0, 255);
//this file now has a simple rule: offset => value //this file now has a simple rule: offset => value
createFile(getFileSystem(), smallSeekFile, false, block); FileSystem fs = getFileSystem();
touch(getFileSystem(), zeroByteFile); createFile(fs, smallSeekFile, true, block);
touch(fs, zeroByteFile);
} }
@Override @Override
@ -79,6 +79,21 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
super.teardown(); super.teardown();
} }
/**
* Skip a test case if the FS doesn't support positioned readable.
* This should hold automatically if the FS supports seek, even
* if it doesn't support seeking past the EOF.
* And, because this test suite requires seek to be supported, the
* feature is automatically assumed to be true unless stated otherwise.
*/
protected void assumeSupportsPositionedReadable() throws IOException {
// because this ,
if (!getContract().isSupported(SUPPORTS_POSITIONED_READABLE, true)) {
skip("Skipping as unsupported feature: "
+ SUPPORTS_POSITIONED_READABLE);
}
}
@Test @Test
public void testSeekZeroByteFile() throws Throwable { public void testSeekZeroByteFile() throws Throwable {
describe("seek and read a 0 byte file"); describe("seek and read a 0 byte file");
@ -282,6 +297,7 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
public void testPositionedBulkReadDoesntChangePosition() throws Throwable { public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
describe( describe(
"verify that a positioned read does not change the getPos() value"); "verify that a positioned read does not change the getPos() value");
assumeSupportsPositionedReadable();
Path testSeekFile = path("bigseekfile.txt"); Path testSeekFile = path("bigseekfile.txt");
byte[] block = dataset(65536, 0, 255); byte[] block = dataset(65536, 0, 255);
createFile(getFileSystem(), testSeekFile, false, block); createFile(getFileSystem(), testSeekFile, false, block);
@ -290,8 +306,9 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
assertTrue(-1 != instream.read()); assertTrue(-1 != instream.read());
assertEquals(40000, instream.getPos()); assertEquals(40000, instream.getPos());
byte[] readBuffer = new byte[256]; int v = 256;
instream.read(128, readBuffer, 0, readBuffer.length); byte[] readBuffer = new byte[v];
assertEquals(v, instream.read(128, readBuffer, 0, v));
//have gone back //have gone back
assertEquals(40000, instream.getPos()); assertEquals(40000, instream.getPos());
//content is the same too //content is the same too
@ -317,12 +334,11 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
Path randomSeekFile = path("testrandomseeks.bin"); Path randomSeekFile = path("testrandomseeks.bin");
createFile(getFileSystem(), randomSeekFile, false, buf); createFile(getFileSystem(), randomSeekFile, false, buf);
Random r = new Random(); Random r = new Random();
FSDataInputStream stm = getFileSystem().open(randomSeekFile);
// Record the sequence of seeks and reads which trigger a failure. // Record the sequence of seeks and reads which trigger a failure.
int[] seeks = new int[10]; int[] seeks = new int[10];
int[] reads = new int[10]; int[] reads = new int[10];
try { try (FSDataInputStream stm = getFileSystem().open(randomSeekFile)) {
for (int i = 0; i < limit; i++) { for (int i = 0; i < limit; i++) {
int seekOff = r.nextInt(buf.length); int seekOff = r.nextInt(buf.length);
int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000)); int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000));
@ -336,13 +352,232 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
sb.append("Sequence of actions:\n"); sb.append("Sequence of actions:\n");
for (int j = 0; j < seeks.length; j++) { for (int j = 0; j < seeks.length; j++) {
sb.append("seek @ ").append(seeks[j]).append(" ") sb.append("seek @ ").append(seeks[j]).append(" ")
.append("read ").append(reads[j]).append("\n"); .append("read ").append(reads[j]).append("\n");
} }
LOG.error(sb.toString()); LOG.error(sb.toString());
throw afe; throw afe;
} finally {
stm.close();
} }
} }
@Test
public void testReadFullyZeroByteFile() throws Throwable {
describe("readFully against a 0 byte file");
assumeSupportsPositionedReadable();
instream = getFileSystem().open(zeroByteFile);
assertEquals(0, instream.getPos());
byte[] buffer = new byte[1];
instream.readFully(0, buffer, 0, 0);
assertEquals(0, instream.getPos());
// seek to 0 read 0 bytes from it
instream.seek(0);
assertEquals(0, instream.read(buffer, 0, 0));
}
@Test
public void testReadFullyPastEOFZeroByteFile() throws Throwable {
assumeSupportsPositionedReadable();
describe("readFully past the EOF of a 0 byte file");
instream = getFileSystem().open(zeroByteFile);
byte[] buffer = new byte[1];
// try to read past end of file
try {
instream.readFully(0, buffer, 0, 16);
fail("Expected an exception");
} catch (IllegalArgumentException | IndexOutOfBoundsException
| EOFException e) {
// expected
}
}
@Test
public void testReadFullySmallFile() throws Throwable {
describe("readFully operations");
assumeSupportsPositionedReadable();
instream = getFileSystem().open(smallSeekFile);
byte[] buffer = new byte[256];
// expect negative length to fail
try {
instream.readFully(0, buffer, 0, -16);
fail("Expected an exception");
} catch (IllegalArgumentException | IndexOutOfBoundsException e) {
// expected
}
// negative offset into buffer
try {
instream.readFully(0, buffer, -1, 16);
fail("Expected an exception");
} catch (IllegalArgumentException | IndexOutOfBoundsException e) {
// expected
}
// expect negative position to fail, ideally with EOF
try {
instream.readFully(-1, buffer);
fail("Expected an exception");
} catch (EOFException e) {
handleExpectedException(e);
} catch (IOException |IllegalArgumentException | IndexOutOfBoundsException e) {
handleRelaxedException("readFully with a negative position ",
"EOFException",
e);
}
// read more than the offset allows
try {
instream.readFully(0, buffer, buffer.length - 8, 16);
fail("Expected an exception");
} catch (IllegalArgumentException | IndexOutOfBoundsException e) {
// expected
}
// read properly
assertEquals(0, instream.getPos());
instream.readFully(0, buffer);
assertEquals(0, instream.getPos());
// now read the entire file in one go
byte[] fullFile = new byte[TEST_FILE_LEN];
instream.readFully(0, fullFile);
assertEquals(0, instream.getPos());
try {
instream.readFully(16, fullFile);
fail("Expected an exception");
} catch (EOFException e) {
handleExpectedException(e);
} catch (IOException e) {
handleRelaxedException("readFully which reads past EOF ",
"EOFException",
e);
}
}
@Test
public void testReadFullyPastEOF() throws Throwable {
describe("readFully past the EOF of a file");
assumeSupportsPositionedReadable();
instream = getFileSystem().open(smallSeekFile);
byte[] buffer = new byte[256];
// now read past the end of the file
try {
instream.readFully(TEST_FILE_LEN + 1, buffer);
fail("Expected an exception");
} catch (EOFException e) {
handleExpectedException(e);
} catch (IOException e) {
handleRelaxedException("readFully with an offset past EOF ",
"EOFException",
e);
}
// read zero bytes from an offset past EOF.
try {
instream.readFully(TEST_FILE_LEN + 1, buffer, 0, 0);
// a zero byte read may fail-fast
LOG.info("Filesystem short-circuits 0-byte reads");
} catch (EOFException e) {
handleExpectedException(e);
} catch (IOException e) {
handleRelaxedException("readFully(0 bytes) with an offset past EOF ",
"EOFException",
e);
}
}
@Test
public void testReadFullyZeroBytebufferPastEOF() throws Throwable {
describe("readFully zero bytes from an offset past EOF");
assumeSupportsPositionedReadable();
instream = getFileSystem().open(smallSeekFile);
byte[] buffer = new byte[256];
try {
instream.readFully(TEST_FILE_LEN + 1, buffer, 0, 0);
// a zero byte read may fail-fast
LOG.info("Filesystem short-circuits 0-byte reads");
} catch (EOFException e) {
handleExpectedException(e);
} catch (IOException e) {
handleRelaxedException("readFully(0 bytes) with an offset past EOF ",
"EOFException",
e);
}
}
@Test
public void testReadNullBuffer() throws Throwable {
describe("try to read a null buffer ");
assumeSupportsPositionedReadable();
try (FSDataInputStream in = getFileSystem().open(smallSeekFile)) {
// Null buffer
int r = in.read(0, null, 0, 16);
fail("Expected an exception from a read into a null buffer, got " + r);
} catch (IllegalArgumentException e) {
// expected
}
}
@Test
public void testReadSmallFile() throws Throwable {
describe("PositionedRead.read operations");
assumeSupportsPositionedReadable();
instream = getFileSystem().open(smallSeekFile);
byte[] buffer = new byte[256];
int r;
// expect negative length to fail
try {
r = instream.read(0, buffer, 0, -16);
fail("Expected an exception, got " + r);
} catch (IllegalArgumentException | IndexOutOfBoundsException e) {
// expected
}
// negative offset into buffer
try {
r = instream.read(0, buffer, -1, 16);
fail("Expected an exception, got " + r);
} catch (IllegalArgumentException | IndexOutOfBoundsException e) {
// expected
}
// negative position
try {
r = instream.read(-1, buffer, 0, 16);
fail("Expected an exception, got " + r);
} catch (EOFException e) {
handleExpectedException(e);
} catch (IOException | IllegalArgumentException | IndexOutOfBoundsException e) {
handleRelaxedException("read() with a negative position ",
"EOFException",
e);
}
// read more than the offset allows
try {
r = instream.read(0, buffer, buffer.length - 8, 16);
fail("Expected an exception, got " + r);
} catch (IllegalArgumentException | IndexOutOfBoundsException e) {
// expected
}
// read properly
assertEquals(0, instream.getPos());
instream.readFully(0, buffer);
assertEquals(0, instream.getPos());
// now read the entire file in one go
byte[] fullFile = new byte[TEST_FILE_LEN];
assertEquals(TEST_FILE_LEN,
instream.read(0, fullFile, 0, fullFile.length));
assertEquals(0, instream.getPos());
// now read past the end of the file
assertEquals(-1,
instream.read(TEST_FILE_LEN + 16, buffer, 0, 1));
}
@Test
public void testReadAtExactEOF() throws Throwable {
describe("read at the end of the file");
instream = getFileSystem().open(smallSeekFile);
instream.seek(TEST_FILE_LEN -1);
assertTrue("read at last byte", instream.read() > 0);
assertEquals("read just past EOF", -1, instream.read());
}
} }

View File

@ -57,7 +57,7 @@ public abstract class AbstractFSContractTestBase extends Assert
public static final int DEFAULT_TEST_TIMEOUT = 180 * 1000; public static final int DEFAULT_TEST_TIMEOUT = 180 * 1000;
/** /**
* The FS contract used for these tets * The FS contract used for these tests
*/ */
private AbstractFSContract contract; private AbstractFSContract contract;

View File

@ -53,20 +53,20 @@ public interface ContractOptions {
/** /**
* Flag to indicate that the FS can rename into directories that * Flag to indicate that the FS can rename into directories that
* don't exist, creating them as needed. * don't exist, creating them as needed.
* @{value} * {@value}
*/ */
String RENAME_CREATES_DEST_DIRS = "rename-creates-dest-dirs"; String RENAME_CREATES_DEST_DIRS = "rename-creates-dest-dirs";
/** /**
* Flag to indicate that the FS does not follow the rename contract -and * Flag to indicate that the FS does not follow the rename contract -and
* instead only returns false on a failure. * instead only returns false on a failure.
* @{value} * {@value}
*/ */
String RENAME_OVERWRITES_DEST = "rename-overwrites-dest"; String RENAME_OVERWRITES_DEST = "rename-overwrites-dest";
/** /**
* Flag to indicate that the FS returns false if the destination exists * Flag to indicate that the FS returns false if the destination exists
* @{value} * {@value}
*/ */
String RENAME_RETURNS_FALSE_IF_DEST_EXISTS = String RENAME_RETURNS_FALSE_IF_DEST_EXISTS =
"rename-returns-false-if-dest-exists"; "rename-returns-false-if-dest-exists";
@ -74,7 +74,7 @@ public interface ContractOptions {
/** /**
* Flag to indicate that the FS returns false on a rename * Flag to indicate that the FS returns false on a rename
* if the source is missing * if the source is missing
* @{value} * {@value}
*/ */
String RENAME_RETURNS_FALSE_IF_SOURCE_MISSING = String RENAME_RETURNS_FALSE_IF_SOURCE_MISSING =
"rename-returns-false-if-source-missing"; "rename-returns-false-if-source-missing";
@ -82,74 +82,74 @@ public interface ContractOptions {
/** /**
* Flag to indicate that the FS remove dest first if it is an empty directory * Flag to indicate that the FS remove dest first if it is an empty directory
* mean the FS honors POSIX rename behavior. * mean the FS honors POSIX rename behavior.
* @{value} * {@value}
*/ */
String RENAME_REMOVE_DEST_IF_EMPTY_DIR = "rename-remove-dest-if-empty-dir"; String RENAME_REMOVE_DEST_IF_EMPTY_DIR = "rename-remove-dest-if-empty-dir";
/** /**
* Flag to indicate that append is supported * Flag to indicate that append is supported
* @{value} * {@value}
*/ */
String SUPPORTS_APPEND = "supports-append"; String SUPPORTS_APPEND = "supports-append";
/** /**
* Flag to indicate that setTimes is supported. * Flag to indicate that setTimes is supported.
* @{value} * {@value}
*/ */
String SUPPORTS_SETTIMES = "supports-settimes"; String SUPPORTS_SETTIMES = "supports-settimes";
/** /**
* Flag to indicate that getFileStatus is supported. * Flag to indicate that getFileStatus is supported.
* @{value} * {@value}
*/ */
String SUPPORTS_GETFILESTATUS = "supports-getfilestatus"; String SUPPORTS_GETFILESTATUS = "supports-getfilestatus";
/** /**
* Flag to indicate that renames are atomic * Flag to indicate that renames are atomic
* @{value} * {@value}
*/ */
String SUPPORTS_ATOMIC_RENAME = "supports-atomic-rename"; String SUPPORTS_ATOMIC_RENAME = "supports-atomic-rename";
/** /**
* Flag to indicate that directory deletes are atomic * Flag to indicate that directory deletes are atomic
* @{value} * {@value}
*/ */
String SUPPORTS_ATOMIC_DIRECTORY_DELETE = "supports-atomic-directory-delete"; String SUPPORTS_ATOMIC_DIRECTORY_DELETE = "supports-atomic-directory-delete";
/** /**
* Does the FS support multiple block locations? * Does the FS support multiple block locations?
* @{value} * {@value}
*/ */
String SUPPORTS_BLOCK_LOCALITY = "supports-block-locality"; String SUPPORTS_BLOCK_LOCALITY = "supports-block-locality";
/** /**
* Does the FS support the concat() operation? * Does the FS support the concat() operation?
* @{value} * {@value}
*/ */
String SUPPORTS_CONCAT = "supports-concat"; String SUPPORTS_CONCAT = "supports-concat";
/** /**
* Is seeking supported at all? * Is seeking supported at all?
* @{value} * {@value}
*/ */
String SUPPORTS_SEEK = "supports-seek"; String SUPPORTS_SEEK = "supports-seek";
/** /**
* Is seeking past the EOF allowed? * Is seeking past the EOF allowed?
* @{value} * {@value}
*/ */
String REJECTS_SEEK_PAST_EOF = "rejects-seek-past-eof"; String REJECTS_SEEK_PAST_EOF = "rejects-seek-past-eof";
/** /**
* Is seeking on a closed file supported? Some filesystems only raise an * Is seeking on a closed file supported? Some filesystems only raise an
* exception later, when trying to read. * exception later, when trying to read.
* @{value} * {@value}
*/ */
String SUPPORTS_SEEK_ON_CLOSED_FILE = "supports-seek-on-closed-file"; String SUPPORTS_SEEK_ON_CLOSED_FILE = "supports-seek-on-closed-file";
/** /**
* Is available() on a closed InputStream supported? * Is available() on a closed InputStream supported?
* @{value} * {@value}
*/ */
String SUPPORTS_AVAILABLE_ON_CLOSED_FILE = "supports-available-on-closed-file"; String SUPPORTS_AVAILABLE_ON_CLOSED_FILE = "supports-available-on-closed-file";
@ -157,32 +157,39 @@ public interface ContractOptions {
* Flag to indicate that this FS expects to throw the strictest * Flag to indicate that this FS expects to throw the strictest
* exceptions it can, not generic IOEs, which, if returned, * exceptions it can, not generic IOEs, which, if returned,
* must be rejected. * must be rejected.
* @{value} * {@value}
*/ */
String SUPPORTS_STRICT_EXCEPTIONS = "supports-strict-exceptions"; String SUPPORTS_STRICT_EXCEPTIONS = "supports-strict-exceptions";
/** /**
* Are unix permissions * Are unix permissions
* @{value} * {@value}
*/ */
String SUPPORTS_UNIX_PERMISSIONS = "supports-unix-permissions"; String SUPPORTS_UNIX_PERMISSIONS = "supports-unix-permissions";
/**
* Is positioned readable supported? Supporting seek should be sufficient
* for this.
* {@value}
*/
String SUPPORTS_POSITIONED_READABLE = "supports-positioned-readable";
/** /**
* Maximum path length * Maximum path length
* @{value} * {@value}
*/ */
String MAX_PATH_ = "max-path"; String MAX_PATH_ = "max-path";
/** /**
* Maximum filesize: 0 or -1 for no limit * Maximum filesize: 0 or -1 for no limit
* @{value} * {@value}
*/ */
String MAX_FILESIZE = "max-filesize"; String MAX_FILESIZE = "max-filesize";
/** /**
* Flag to indicate that tests on the root directories of a filesystem/ * Flag to indicate that tests on the root directories of a filesystem/
* object store are permitted * object store are permitted
* @{value} * {@value}
*/ */
String TEST_ROOT_TESTS_ENABLED = "test.root-tests-enabled"; String TEST_ROOT_TESTS_ENABLED = "test.root-tests-enabled";

View File

@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.junit.Assert; import org.junit.Assert;
import org.junit.internal.AssumptionViolatedException; import org.junit.internal.AssumptionViolatedException;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -432,9 +433,7 @@ public class ContractTestUtils extends Assert {
* @throws AssertionError with the text and throwable -always * @throws AssertionError with the text and throwable -always
*/ */
public static void fail(String text, Throwable thrown) { public static void fail(String text, Throwable thrown) {
AssertionError e = new AssertionError(text); throw new AssertionError(text, thrown);
e.initCause(thrown);
throw e;
} }
/** /**
@ -509,10 +508,14 @@ public class ContractTestUtils extends Assert {
boolean overwrite, boolean overwrite,
byte[] data) throws IOException { byte[] data) throws IOException {
FSDataOutputStream stream = fs.create(path, overwrite); FSDataOutputStream stream = fs.create(path, overwrite);
if (data != null && data.length > 0) { try {
stream.write(data); if (data != null && data.length > 0) {
stream.write(data);
}
stream.close();
} finally {
IOUtils.closeStream(stream);
} }
stream.close();
} }
/** /**
@ -574,13 +577,10 @@ public class ContractTestUtils extends Assert {
public static String readBytesToString(FileSystem fs, public static String readBytesToString(FileSystem fs,
Path path, Path path,
int length) throws IOException { int length) throws IOException {
FSDataInputStream in = fs.open(path); try (FSDataInputStream in = fs.open(path)) {
try {
byte[] buf = new byte[length]; byte[] buf = new byte[length];
in.readFully(0, buf); in.readFully(0, buf);
return toChar(buf); return toChar(buf);
} finally {
in.close();
} }
} }
@ -786,8 +786,7 @@ public class ContractTestUtils extends Assert {
long totalBytesRead = 0; long totalBytesRead = 0;
int nextExpectedNumber = 0; int nextExpectedNumber = 0;
final InputStream inputStream = fs.open(path); try (InputStream inputStream = fs.open(path)) {
try {
while (true) { while (true) {
final int bytesRead = inputStream.read(testBuffer); final int bytesRead = inputStream.read(testBuffer);
if (bytesRead < 0) { if (bytesRead < 0) {
@ -814,8 +813,6 @@ public class ContractTestUtils extends Assert {
throw new IOException("Expected to read " + expectedSize + throw new IOException("Expected to read " + expectedSize +
" bytes but only received " + totalBytesRead); " bytes but only received " + totalBytesRead);
} }
} finally {
inputStream.close();
} }
} }

View File

@ -235,6 +235,16 @@ public class TestHttpServer extends HttpServerFunctionalTest {
assertEquals("text/html; charset=utf-8", conn.getContentType()); assertEquals("text/html; charset=utf-8", conn.getContentType());
} }
@Test
public void testHttpResonseContainsXFrameOptions() throws IOException {
URL url = new URL(baseUrl, "");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
assertTrue("X-FRAME-OPTIONS is absent in the header", xfoHeader != null);
}
/** /**
* Dummy filter that mimics as an authentication filter. Obtains user identity * Dummy filter that mimics as an authentication filter. Obtains user identity
* from the request parameter user.name. Wraps around the request so that * from the request parameter user.name. Wraps around the request so that

View File

@ -100,7 +100,7 @@ case sensitivity and permission options are determined at run time from OS type
<value>true</value> <value>true</value>
</property> </property>
<!-- checksum FS doesn't allow seeing past EOF --> <!-- checksum FS doesn't allow seeking past EOF -->
<property> <property>
<name>fs.contract.rejects-seek-past-eof</name> <name>fs.contract.rejects-seek-past-eof</name>
<value>true</value> <value>true</value>

View File

@ -284,7 +284,15 @@ The answer to "What is your first and last name?" (i.e. "CN") must be the hostna
NOTE: You need to restart the KMS for the configuration changes to take effect. NOTE: You need to restart the KMS for the configuration changes to take effect.
$H4 KMS Access Control $H4 ACLs (Access Control Lists)
KMS supports ACLs (Access Control Lists) for fine-grained permission control.
Two levels of ACLs exist in KMS: KMS ACLs and Key ACLs. KMS ACLs control access at KMS operation level, and precede Key ACLs. In particular, only if permission is granted at KMS ACLs level, shall the permission check against Key ACLs be performed.
The configuration and usage of KMS ACLs and Key ACLs are described in the sections below.
$H5 KMS ACLs
KMS ACLs configuration are defined in the KMS `etc/hadoop/kms-acls.xml` configuration file. This file is hot-reloaded when it changes. KMS ACLs configuration are defined in the KMS `etc/hadoop/kms-acls.xml` configuration file. This file is hot-reloaded when it changes.
@ -452,7 +460,7 @@ A user accessing KMS is first checked for inclusion in the Access Control List f
</configuration> </configuration>
``` ```
$H4 Key Access Control $H5 Key ACLs
KMS supports access control for all non-read operations at the Key level. All Key Access operations are classified as : KMS supports access control for all non-read operations at the Key level. All Key Access operations are classified as :
@ -466,9 +474,9 @@ These can be defined in the KMS `etc/hadoop/kms-acls.xml` as follows
For all keys for which a key access has not been explicitly configured, It is possible to configure a default key access control for a subset of the operation types. For all keys for which a key access has not been explicitly configured, It is possible to configure a default key access control for a subset of the operation types.
It is also possible to configure a "whitelist" key ACL for a subset of the operation types. The whitelist key ACL is a whitelist in addition to the explicit or default per-key ACL. That is, if no per-key ACL is explicitly set, a user will be granted access if they are present in the default per-key ACL or the whitelist key ACL. If a per-key ACL is explicitly set, a user will be granted access if they are present in the per-key ACL or the whitelist key ACL. It is also possible to configure a "whitelist" key ACL for a subset of the operation types. The whitelist key ACL grants access to the key, in addition to the explicit or default per-key ACL. That is, if no per-key ACL is explicitly set, a user will be granted access if they are present in the default per-key ACL or the whitelist key ACL. If a per-key ACL is explicitly set, a user will be granted access if they are present in the per-key ACL or the whitelist key ACL.
If no ACL is configured for a specific key AND no default ACL is configured AND no root key ACL is configured for the requested operation, then access will be DENIED. If no ACL is configured for a specific key AND no default ACL is configured AND no whitelist key ACL is configured for the requested operation, then access will be DENIED.
**NOTE:** The default and whitelist key ACL does not support `ALL` operation qualifier. **NOTE:** The default and whitelist key ACL does not support `ALL` operation qualifier.
@ -575,7 +583,11 @@ If no ACL is configured for a specific key AND no default ACL is configured AND
$H3 KMS Delegation Token Configuration $H3 KMS Delegation Token Configuration
KMS delegation token secret manager can be configured with the following properties: KMS supports delegation tokens to authenticate to the key providers from processes without Kerberos credentials.
KMS delegation token authentication extends the default Hadoop authentication. See [Hadoop Auth](../hadoop-auth/index.html) page for more details.
Additionally, KMS delegation token secret manager can be configured with the following properties:
```xml ```xml
<property> <property>
@ -590,7 +602,7 @@ KMS delegation token secret manager can be configured with the following propert
<name>hadoop.kms.authentication.delegation-token.max-lifetime.sec</name> <name>hadoop.kms.authentication.delegation-token.max-lifetime.sec</name>
<value>604800</value> <value>604800</value>
<description> <description>
Maximum lifetime of a delagation token, in seconds. Default value 7 days. Maximum lifetime of a delegation token, in seconds. Default value 7 days.
</description> </description>
</property> </property>
@ -598,7 +610,7 @@ KMS delegation token secret manager can be configured with the following propert
<name>hadoop.kms.authentication.delegation-token.renew-interval.sec</name> <name>hadoop.kms.authentication.delegation-token.renew-interval.sec</name>
<value>86400</value> <value>86400</value>
<description> <description>
Renewal interval of a delagation token, in seconds. Default value 1 day. Renewal interval of a delegation token, in seconds. Default value 1 day.
</description> </description>
</property> </property>
@ -640,7 +652,7 @@ $H4 HTTP Authentication Signature
KMS uses Hadoop Authentication for HTTP authentication. Hadoop Authentication issues a signed HTTP Cookie once the client has authenticated successfully. This HTTP Cookie has an expiration time, after which it will trigger a new authentication sequence. This is done to avoid triggering the authentication on every HTTP request of a client. KMS uses Hadoop Authentication for HTTP authentication. Hadoop Authentication issues a signed HTTP Cookie once the client has authenticated successfully. This HTTP Cookie has an expiration time, after which it will trigger a new authentication sequence. This is done to avoid triggering the authentication on every HTTP request of a client.
A KMS instance must verify the HTTP Cookie signatures signed by other KMS instances. To do this all KMS instances must share the signing secret. A KMS instance must verify the HTTP Cookie signatures signed by other KMS instances. To do this, all KMS instances must share the signing secret. Please see [SignerSecretProvider Configuration](../hadoop-auth/Configuration.html#SignerSecretProvider_Configuration) for detailed description and configuration examples. Note that KMS configurations need to be prefixed with `hadoop.kms.authentication`, as shown in the example below.
This secret sharing can be done using a Zookeeper service which is configured in KMS with the following properties in the `kms-site.xml`: This secret sharing can be done using a Zookeeper service which is configured in KMS with the following properties in the `kms-site.xml`:
@ -650,8 +662,9 @@ This secret sharing can be done using a Zookeeper service which is configured in
<value>zookeeper</value> <value>zookeeper</value>
<description> <description>
Indicates how the secret to sign the authentication cookies will be Indicates how the secret to sign the authentication cookies will be
stored. Options are 'random' (default), 'string' and 'zookeeper'. stored. Options are 'random' (default), 'file' and 'zookeeper'.
If using a setup with multiple KMS instances, 'zookeeper' should be used. If using a setup with multiple KMS instances, 'zookeeper' should be used.
If using file, signature.secret.file should be configured and point to the secret file.
</description> </description>
</property> </property>
<property> <property>
@ -659,7 +672,7 @@ This secret sharing can be done using a Zookeeper service which is configured in
<value>/hadoop-kms/hadoop-auth-signature-secret</value> <value>/hadoop-kms/hadoop-auth-signature-secret</value>
<description> <description>
The Zookeeper ZNode path where the KMS instances will store and retrieve The Zookeeper ZNode path where the KMS instances will store and retrieve
the secret from. the secret from. All KMS instances that need to coordinate should point to the same path.
</description> </description>
</property> </property>
<property> <property>
@ -696,7 +709,11 @@ This secret sharing can be done using a Zookeeper service which is configured in
$H4 Delegation Tokens $H4 Delegation Tokens
TBD Similar to HTTP authentication, KMS uses Hadoop Authentication for delegation tokens too.
Under HA, A KMS instance must verify the delegation token given by another KMS instance, by checking the shared secret used to sign the delegation token. To do this, all KMS instances must be able to retrieve the shared secret from ZooKeeper.
Please see the examples given in HTTP Authentication section to configure ZooKeeper for secret sharing.
$H3 KMS HTTP REST API $H3 KMS HTTP REST API

View File

@ -972,6 +972,10 @@ public class DFSInputStream extends FSInputStream
@Override @Override
public synchronized int read(@Nonnull final byte buf[], int off, int len) public synchronized int read(@Nonnull final byte buf[], int off, int len)
throws IOException { throws IOException {
validatePositionedReadArgs(pos, buf, off, len);
if (len == 0) {
return 0;
}
ReaderStrategy byteArrayReader = new ByteArrayStrategy(buf); ReaderStrategy byteArrayReader = new ByteArrayStrategy(buf);
try (TraceScope scope = try (TraceScope scope =
dfsClient.newReaderTraceScope("DFSInputStream#byteArrayRead", dfsClient.newReaderTraceScope("DFSInputStream#byteArrayRead",
@ -1423,6 +1427,10 @@ public class DFSInputStream extends FSInputStream
@Override @Override
public int read(long position, byte[] buffer, int offset, int length) public int read(long position, byte[] buffer, int offset, int length)
throws IOException { throws IOException {
validatePositionedReadArgs(position, buffer, offset, length);
if (length == 0) {
return 0;
}
try (TraceScope scope = dfsClient. try (TraceScope scope = dfsClient.
newReaderTraceScope("DFSInputStream#byteArrayPread", newReaderTraceScope("DFSInputStream#byteArrayPread",
src, position, length)) { src, position, length)) {

View File

@ -139,9 +139,6 @@ public class HdfsConfiguration extends Configuration {
HdfsClientConfigKeys.DFS_NAMESERVICES), HdfsClientConfigKeys.DFS_NAMESERVICES),
new DeprecationDelta("dfs.federation.nameservice.id", new DeprecationDelta("dfs.federation.nameservice.id",
DeprecatedKeys.DFS_NAMESERVICE_ID), DeprecatedKeys.DFS_NAMESERVICE_ID),
new DeprecationDelta("dfs.client.file-block-storage-locations.timeout",
HdfsClientConfigKeys.
DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS),
}); });
} }

View File

@ -97,12 +97,6 @@ public interface HdfsClientConfigKeys {
int DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT = 3; int DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT = 3;
String DFS_CLIENT_CONTEXT = "dfs.client.context"; String DFS_CLIENT_CONTEXT = "dfs.client.context";
String DFS_CLIENT_CONTEXT_DEFAULT = "default"; String DFS_CLIENT_CONTEXT_DEFAULT = "default";
String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS =
"dfs.client.file-block-storage-locations.num-threads";
int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT = 10;
String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS =
"dfs.client.file-block-storage-locations.timeout.millis";
int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT = 1000;
String DFS_CLIENT_USE_LEGACY_BLOCKREADER = String DFS_CLIENT_USE_LEGACY_BLOCKREADER =
"dfs.client.use.legacy.blockreader"; "dfs.client.use.legacy.blockreader";
boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT = false; boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT = false;

View File

@ -28,6 +28,7 @@ import java.util.Map;
import java.util.StringTokenizer; import java.util.StringTokenizer;
import org.apache.commons.io.input.BoundedInputStream; import org.apache.commons.io.input.BoundedInputStream;
import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FSInputStream;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
@ -128,6 +129,9 @@ public abstract class ByteRangeInputStream extends FSInputStream {
@VisibleForTesting @VisibleForTesting
protected InputStreamAndFileLength openInputStream(long startOffset) protected InputStreamAndFileLength openInputStream(long startOffset)
throws IOException { throws IOException {
if (startOffset < 0) {
throw new EOFException("Negative Position");
}
// Use the original url if no resolved url exists, eg. if // Use the original url if no resolved url exists, eg. if
// it's the first time a request is made. // it's the first time a request is made.
final boolean resolved = resolvedURL.getURL() != null; final boolean resolved = resolvedURL.getURL() != null;
@ -224,6 +228,10 @@ public abstract class ByteRangeInputStream extends FSInputStream {
@Override @Override
public int read(long position, byte[] buffer, int offset, int length) public int read(long position, byte[] buffer, int offset, int length)
throws IOException { throws IOException {
validatePositionedReadArgs(position, buffer, offset, length);
if (length == 0) {
return 0;
}
try (InputStream in = openInputStream(position).in) { try (InputStream in = openInputStream(position).in) {
return in.read(buffer, offset, length); return in.read(buffer, offset, length);
} }
@ -232,17 +240,21 @@ public abstract class ByteRangeInputStream extends FSInputStream {
@Override @Override
public void readFully(long position, byte[] buffer, int offset, int length) public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException { throws IOException {
final InputStreamAndFileLength fin = openInputStream(position); validatePositionedReadArgs(position, buffer, offset, length);
if (fin.length != null && length + position > fin.length) { if (length == 0) {
throw new EOFException("The length to read " + length return;
+ " exceeds the file length " + fin.length);
} }
final InputStreamAndFileLength fin = openInputStream(position);
try { try {
if (fin.length != null && length + position > fin.length) {
throw new EOFException("The length to read " + length
+ " exceeds the file length " + fin.length);
}
int nread = 0; int nread = 0;
while (nread < length) { while (nread < length) {
int nbytes = fin.in.read(buffer, offset + nread, length - nread); int nbytes = fin.in.read(buffer, offset + nread, length - nread);
if (nbytes < 0) { if (nbytes < 0) {
throw new EOFException("End of file reached before reading fully."); throw new EOFException(FSExceptionMessages.EOF_IN_READ_FULLY);
} }
nread += nbytes; nread += nbytes;
} }

View File

@ -1197,24 +1197,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
@Deprecated @Deprecated
public static final String DFS_CLIENT_CONTEXT_DEFAULT = public static final String DFS_CLIENT_CONTEXT_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT; HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
@Deprecated
public static final String
DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS =
HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS;
@Deprecated
public static final int
DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT =
HdfsClientConfigKeys
.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT;
@Deprecated
public static final String
DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS =
HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS;
@Deprecated
public static final int
DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT =
HdfsClientConfigKeys
.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT;
@Deprecated @Deprecated
public static final String DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY = public static final String DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY =

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList;
import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
@ -47,6 +48,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStr
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.*; import org.apache.hadoop.net.*;
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException; import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import java.io.IOException; import java.io.IOException;
@ -368,22 +370,88 @@ public class DatanodeManager {
} }
/** Sort the located blocks by the distance to the target host. */ /**
public void sortLocatedBlocks(final String targethost, * Sort the non-striped located blocks by the distance to the target host.
final List<LocatedBlock> locatedblocks) { *
//sort the blocks * For striped blocks, it will only move decommissioned/stale nodes to the
* bottom. For example, assume we have storage list:
* d0, d1, d2, d3, d4, d5, d6, d7, d8, d9
* mapping to block indices:
* 0, 1, 2, 3, 4, 5, 6, 7, 8, 2
*
* Here the internal block b2 is duplicated, locating in d2 and d9. If d2 is
* a decommissioning node then should switch d2 and d9 in the storage list.
* After sorting locations, will update corresponding block indices
* and block tokens.
*/
public void sortLocatedBlocks(final String targetHost,
final List<LocatedBlock> locatedBlocks) {
Comparator<DatanodeInfo> comparator = avoidStaleDataNodesForRead ?
new DFSUtil.DecomStaleComparator(staleInterval) :
DFSUtil.DECOM_COMPARATOR;
// sort located block
for (LocatedBlock lb : locatedBlocks) {
if (lb.isStriped()) {
sortLocatedStripedBlock(lb, comparator);
} else {
sortLocatedBlock(lb, targetHost, comparator);
}
}
}
/**
* Move decommissioned/stale datanodes to the bottom. After sorting it will
* update block indices and block tokens respectively.
*
* @param lb located striped block
* @param comparator dn comparator
*/
private void sortLocatedStripedBlock(final LocatedBlock lb,
Comparator<DatanodeInfo> comparator) {
DatanodeInfo[] di = lb.getLocations();
HashMap<DatanodeInfo, Byte> locToIndex = new HashMap<>();
HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken =
new HashMap<>();
LocatedStripedBlock lsb = (LocatedStripedBlock) lb;
for (int i = 0; i < di.length; i++) {
locToIndex.put(di[i], lsb.getBlockIndices()[i]);
locToToken.put(di[i], lsb.getBlockTokens()[i]);
}
// Move decommissioned/stale datanodes to the bottom
Arrays.sort(di, comparator);
// must update cache since we modified locations array
lb.updateCachedStorageInfo();
// must update block indices and block tokens respectively
for (int i = 0; i < di.length; i++) {
lsb.getBlockIndices()[i] = locToIndex.get(di[i]);
lsb.getBlockTokens()[i] = locToToken.get(di[i]);
}
}
/**
* Move decommissioned/stale datanodes to the bottom. Also, sort nodes by
* network distance.
*
* @param lb located block
* @param targetHost target host
* @param comparator dn comparator
*/
private void sortLocatedBlock(final LocatedBlock lb, String targetHost,
Comparator<DatanodeInfo> comparator) {
// As it is possible for the separation of node manager and datanode, // As it is possible for the separation of node manager and datanode,
// here we should get node but not datanode only . // here we should get node but not datanode only .
Node client = getDatanodeByHost(targethost); Node client = getDatanodeByHost(targetHost);
if (client == null) { if (client == null) {
List<String> hosts = new ArrayList<> (1); List<String> hosts = new ArrayList<> (1);
hosts.add(targethost); hosts.add(targetHost);
List<String> resolvedHosts = dnsToSwitchMapping.resolve(hosts); List<String> resolvedHosts = dnsToSwitchMapping.resolve(hosts);
if (resolvedHosts != null && !resolvedHosts.isEmpty()) { if (resolvedHosts != null && !resolvedHosts.isEmpty()) {
String rName = resolvedHosts.get(0); String rName = resolvedHosts.get(0);
if (rName != null) { if (rName != null) {
client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR + client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR +
targethost); targetHost);
} }
} else { } else {
LOG.error("Node Resolution failed. Please make sure that rack " + LOG.error("Node Resolution failed. Please make sure that rack " +
@ -391,26 +459,21 @@ public class DatanodeManager {
} }
} }
Comparator<DatanodeInfo> comparator = avoidStaleDataNodesForRead ? DatanodeInfo[] di = lb.getLocations();
new DFSUtil.DecomStaleComparator(staleInterval) : // Move decommissioned/stale datanodes to the bottom
DFSUtil.DECOM_COMPARATOR; Arrays.sort(di, comparator);
for (LocatedBlock b : locatedblocks) { // Sort nodes by network distance only for located blocks
DatanodeInfo[] di = b.getLocations(); int lastActiveIndex = di.length - 1;
// Move decommissioned/stale datanodes to the bottom while (lastActiveIndex > 0 && isInactive(di[lastActiveIndex])) {
Arrays.sort(di, comparator); --lastActiveIndex;
int lastActiveIndex = di.length - 1;
while (lastActiveIndex > 0 && isInactive(di[lastActiveIndex])) {
--lastActiveIndex;
}
int activeLen = lastActiveIndex + 1;
networktopology.sortByDistance(client, b.getLocations(), activeLen);
// must update cache since we modified locations array
b.updateCachedStorageInfo();
} }
} int activeLen = lastActiveIndex + 1;
networktopology.sortByDistance(client, lb.getLocations(), activeLen);
// must update cache since we modified locations array
lb.updateCachedStorageInfo();
}
/** @return the datanode descriptor for the host. */ /** @return the datanode descriptor for the host. */
public DatanodeDescriptor getDatanodeByHost(final String host) { public DatanodeDescriptor getDatanodeByHost(final String host) {

View File

@ -114,6 +114,9 @@ public class DNConf {
// Allow LAZY_PERSIST writes from non-local clients? // Allow LAZY_PERSIST writes from non-local clients?
private final boolean allowNonLocalLazyPersist; private final boolean allowNonLocalLazyPersist;
private final int volFailuresTolerated;
private final int volsConfigured;
public DNConf(Configuration conf) { public DNConf(Configuration conf) {
this.conf = conf; this.conf = conf;
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
@ -238,6 +241,13 @@ public class DNConf {
this.bpReadyTimeout = conf.getLong( this.bpReadyTimeout = conf.getLong(
DFS_DATANODE_BP_READY_TIMEOUT_KEY, DFS_DATANODE_BP_READY_TIMEOUT_KEY,
DFS_DATANODE_BP_READY_TIMEOUT_DEFAULT); DFS_DATANODE_BP_READY_TIMEOUT_DEFAULT);
this.volFailuresTolerated =
conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);
String[] dataDirs =
conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
this.volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
} }
// We get minimumNameNodeVersion via a method so it can be mocked out in tests. // We get minimumNameNodeVersion via a method so it can be mocked out in tests.
@ -363,4 +373,12 @@ public class DNConf {
public long getLifelineIntervalMs() { public long getLifelineIntervalMs() {
return lifelineIntervalMs; return lifelineIntervalMs;
} }
public int getVolFailuresTolerated() {
return volFailuresTolerated;
}
public int getVolsConfigured() {
return volsConfigured;
}
} }

View File

@ -1280,6 +1280,15 @@ public class DataNode extends ReconfigurableBase
LOG.info("Starting DataNode with maxLockedMemory = " + LOG.info("Starting DataNode with maxLockedMemory = " +
dnConf.maxLockedMemory); dnConf.maxLockedMemory);
int volFailuresTolerated = dnConf.getVolFailuresTolerated();
int volsConfigured = dnConf.getVolsConfigured();
if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
throw new DiskErrorException("Invalid value configured for "
+ "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
+ ". Value configured is either less than 0 or >= "
+ "to the number of configured volumes (" + volsConfigured + ").");
}
storage = new DataStorage(); storage = new DataStorage();
// global DN settings // global DN settings

View File

@ -36,8 +36,9 @@ import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DU; import org.apache.hadoop.fs.CachingGetSpaceUsed;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.GetSpaceUsed;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
@ -92,7 +93,7 @@ class BlockPoolSlice {
private final Timer timer; private final Timer timer;
// TODO:FEDERATION scalability issue - a thread per DU is needed // TODO:FEDERATION scalability issue - a thread per DU is needed
private final DU dfsUsage; private final GetSpaceUsed dfsUsage;
/** /**
* Create a blook pool slice * Create a blook pool slice
@ -151,8 +152,10 @@ class BlockPoolSlice {
} }
// Use cached value initially if available. Or the following call will // Use cached value initially if available. Or the following call will
// block until the initial du command completes. // block until the initial du command completes.
this.dfsUsage = new DU(bpDir, conf, loadDfsUsed()); this.dfsUsage = new CachingGetSpaceUsed.Builder().setPath(bpDir)
this.dfsUsage.start(); .setConf(conf)
.setInitialUsed(loadDfsUsed())
.build();
// Make the dfs usage to be saved during shutdown. // Make the dfs usage to be saved during shutdown.
ShutdownHookManager.get().addShutdownHook( ShutdownHookManager.get().addShutdownHook(
@ -188,7 +191,9 @@ class BlockPoolSlice {
/** Run DU on local drives. It must be synchronized from caller. */ /** Run DU on local drives. It must be synchronized from caller. */
void decDfsUsed(long value) { void decDfsUsed(long value) {
dfsUsage.decDfsUsed(value); if (dfsUsage instanceof CachingGetSpaceUsed) {
((CachingGetSpaceUsed)dfsUsage).incDfsUsed(-value);
}
} }
long getDfsUsed() throws IOException { long getDfsUsed() throws IOException {
@ -196,7 +201,9 @@ class BlockPoolSlice {
} }
void incDfsUsed(long value) { void incDfsUsed(long value) {
dfsUsage.incDfsUsed(value); if (dfsUsage instanceof CachingGetSpaceUsed) {
((CachingGetSpaceUsed)dfsUsage).incDfsUsed(value);
}
} }
/** /**
@ -304,7 +311,10 @@ class BlockPoolSlice {
} }
File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir); File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir);
File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp()); File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length()); if (dfsUsage instanceof CachingGetSpaceUsed) {
((CachingGetSpaceUsed) dfsUsage).incDfsUsed(
b.getNumBytes() + metaFile.length());
}
return blockFile; return blockFile;
} }
@ -720,7 +730,10 @@ class BlockPoolSlice {
saveReplicas(blocksListToPersist); saveReplicas(blocksListToPersist);
saveDfsUsed(); saveDfsUsed();
dfsUsedSaved = true; dfsUsedSaved = true;
dfsUsage.shutdown();
if (dfsUsage instanceof CachingGetSpaceUsed) {
IOUtils.cleanup(LOG, ((CachingGetSpaceUsed) dfsUsage));
}
} }
private boolean readReplicasFromCache(ReplicaMap volumeMap, private boolean readReplicasFromCache(ReplicaMap volumeMap,

View File

@ -268,24 +268,15 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf); this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
// The number of volumes required for operation is the total number // The number of volumes required for operation is the total number
// of volumes minus the number of failed volumes we can tolerate. // of volumes minus the number of failed volumes we can tolerate.
volFailuresTolerated = volFailuresTolerated = datanode.getDnConf().getVolFailuresTolerated();
conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);
String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf); Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos( List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
dataLocations, storage); dataLocations, storage);
int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length; int volsConfigured = datanode.getDnConf().getVolsConfigured();
int volsFailed = volumeFailureInfos.size(); int volsFailed = volumeFailureInfos.size();
if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
throw new DiskErrorException("Invalid value configured for "
+ "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
+ ". Value configured is either less than 0 or >= "
+ "to the number of configured volumes (" + volsConfigured + ").");
}
if (volsFailed > volFailuresTolerated) { if (volsFailed > volFailuresTolerated) {
throw new DiskErrorException("Too many failed volumes - " throw new DiskErrorException("Too many failed volumes - "
+ "current valid volumes: " + storage.getNumStorageDirs() + "current valid volumes: " + storage.getNumStorageDirs()
@ -1159,7 +1150,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
// construct a RBW replica with the new GS // construct a RBW replica with the new GS
File blkfile = replicaInfo.getBlockFile(); File blkfile = replicaInfo.getBlockFile();
FsVolumeImpl v = (FsVolumeImpl)replicaInfo.getVolume(); FsVolumeImpl v = (FsVolumeImpl)replicaInfo.getVolume();
if (v.getAvailable() < estimateBlockLen - replicaInfo.getNumBytes()) { long bytesReserved = estimateBlockLen - replicaInfo.getNumBytes();
if (v.getAvailable() < bytesReserved) {
throw new DiskOutOfSpaceException("Insufficient space for appending to " throw new DiskOutOfSpaceException("Insufficient space for appending to "
+ replicaInfo); + replicaInfo);
} }
@ -1167,7 +1159,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
File oldmeta = replicaInfo.getMetaFile(); File oldmeta = replicaInfo.getMetaFile();
ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten( ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(
replicaInfo.getBlockId(), replicaInfo.getNumBytes(), newGS, replicaInfo.getBlockId(), replicaInfo.getNumBytes(), newGS,
v, newBlkFile.getParentFile(), Thread.currentThread(), estimateBlockLen); v, newBlkFile.getParentFile(), Thread.currentThread(), bytesReserved);
File newmeta = newReplicaInfo.getMetaFile(); File newmeta = newReplicaInfo.getMetaFile();
// rename meta file to rbw directory // rename meta file to rbw directory
@ -1203,7 +1195,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
// Replace finalized replica by a RBW replica in replicas map // Replace finalized replica by a RBW replica in replicas map
volumeMap.add(bpid, newReplicaInfo); volumeMap.add(bpid, newReplicaInfo);
v.reserveSpaceForReplica(estimateBlockLen - replicaInfo.getNumBytes()); v.reserveSpaceForReplica(bytesReserved);
return newReplicaInfo; return newReplicaInfo;
} }

View File

@ -184,7 +184,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeException; import org.apache.hadoop.hdfs.protocol.RollingUpgradeException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
@ -903,6 +902,36 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
return null; return null;
} }
/**
* Locate DefaultAuditLogger, if any, to enable/disable CallerContext.
*
* @param value
* true, enable CallerContext, otherwise false to disable it.
*/
void setCallerContextEnabled(final boolean value) {
for (AuditLogger logger : auditLoggers) {
if (logger instanceof DefaultAuditLogger) {
((DefaultAuditLogger) logger).setCallerContextEnabled(value);
break;
}
}
}
/**
* Get the value indicating if CallerContext is enabled.
*
* @return true, if CallerContext is enabled, otherwise false, if it's
* disabled.
*/
boolean getCallerContextEnabled() {
for (AuditLogger logger : auditLoggers) {
if (logger instanceof DefaultAuditLogger) {
return ((DefaultAuditLogger) logger).getCallerContextEnabled();
}
}
return false;
}
private List<AuditLogger> initAuditLoggers(Configuration conf) { private List<AuditLogger> initAuditLoggers(Configuration conf) {
// Initialize the custom access loggers if configured. // Initialize the custom access loggers if configured.
Collection<String> alClasses = Collection<String> alClasses =
@ -1779,25 +1808,28 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
LocatedBlocks blocks = res.blocks; LocatedBlocks blocks = res.blocks;
sortLocatedBlocks(clientMachine, blocks);
return blocks;
}
private void sortLocatedBlocks(String clientMachine, LocatedBlocks blocks) {
if (blocks != null) { if (blocks != null) {
List<LocatedBlock> blkList = blocks.getLocatedBlocks(); List<LocatedBlock> blkList = blocks.getLocatedBlocks();
if (blkList == null || blkList.size() == 0 || if (blkList == null || blkList.size() == 0) {
blkList.get(0) instanceof LocatedStripedBlock) { // simply return, block list is empty
// no need to sort locations for striped blocks return;
return blocks;
} }
blockManager.getDatanodeManager().sortLocatedBlocks( blockManager.getDatanodeManager().sortLocatedBlocks(clientMachine,
clientMachine, blkList); blkList);
// lastBlock is not part of getLocatedBlocks(), might need to sort it too // lastBlock is not part of getLocatedBlocks(), might need to sort it too
LocatedBlock lastBlock = blocks.getLastLocatedBlock(); LocatedBlock lastBlock = blocks.getLastLocatedBlock();
if (lastBlock != null) { if (lastBlock != null) {
ArrayList<LocatedBlock> lastBlockList = Lists.newArrayList(lastBlock); ArrayList<LocatedBlock> lastBlockList = Lists.newArrayList(lastBlock);
blockManager.getDatanodeManager().sortLocatedBlocks( blockManager.getDatanodeManager().sortLocatedBlocks(clientMachine,
clientMachine, lastBlockList); lastBlockList);
} }
} }
return blocks;
} }
/** /**
@ -4279,10 +4311,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
setManualAndResourceLowSafeMode(!resourcesLow, resourcesLow); setManualAndResourceLowSafeMode(!resourcesLow, resourcesLow);
NameNode.stateChangeLog.info("STATE* Safe mode is ON.\n" + NameNode.stateChangeLog.info("STATE* Safe mode is ON.\n" +
getSafeModeTip()); getSafeModeTip());
if (isEditlogOpenForWrite) {
getEditLog().logSyncAll();
}
NameNode.stateChangeLog.info("STATE* Safe mode is ON" + getSafeModeTip());
} finally { } finally {
writeUnlock(); writeUnlock();
} }
@ -6850,13 +6878,33 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
}; };
private boolean isCallerContextEnabled; private volatile boolean isCallerContextEnabled;
private int callerContextMaxLen; private int callerContextMaxLen;
private int callerSignatureMaxLen; private int callerSignatureMaxLen;
private boolean logTokenTrackingId; private boolean logTokenTrackingId;
private Set<String> debugCmdSet = new HashSet<String>(); private Set<String> debugCmdSet = new HashSet<String>();
/**
* Enable or disable CallerContext.
*
* @param value
* true, enable CallerContext, otherwise false to disable it.
*/
void setCallerContextEnabled(final boolean value) {
isCallerContextEnabled = value;
}
/**
* Get the value indicating if CallerContext is enabled.
*
* @return true, if CallerContext is enabled, otherwise false, if it's
* disabled.
*/
boolean getCallerContextEnabled() {
return isCallerContextEnabled;
}
@Override @Override
public void initialize(Configuration conf) { public void initialize(Configuration conf) {
isCallerContextEnabled = conf.getBoolean( isCallerContextEnabled = conf.getBoolean(

View File

@ -107,6 +107,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY;
@ -277,7 +279,8 @@ public class NameNode extends ReconfigurableBase implements
.unmodifiableList(Arrays .unmodifiableList(Arrays
.asList(DFS_HEARTBEAT_INTERVAL_KEY, .asList(DFS_HEARTBEAT_INTERVAL_KEY,
DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
FS_PROTECTED_DIRECTORIES)); FS_PROTECTED_DIRECTORIES,
HADOOP_CALLER_CONTEXT_ENABLED_KEY));
private static final String USAGE = "Usage: hdfs namenode [" private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t[" + StartupOption.BACKUP.getName() + "] | \n\t["
@ -2008,7 +2011,9 @@ public class NameNode extends ReconfigurableBase implements
+ datanodeManager.getHeartbeatRecheckInterval()); + datanodeManager.getHeartbeatRecheckInterval());
} }
case FS_PROTECTED_DIRECTORIES: case FS_PROTECTED_DIRECTORIES:
return getNamesystem().getFSDirectory().setProtectedDirectories(newVal); return reconfProtectedDirectories(newVal);
case HADOOP_CALLER_CONTEXT_ENABLED_KEY:
return reconfCallerContextEnabled(newVal);
default: default:
break; break;
} }
@ -2016,6 +2021,21 @@ public class NameNode extends ReconfigurableBase implements
.get(property)); .get(property));
} }
private String reconfProtectedDirectories(String newVal) {
return getNamesystem().getFSDirectory().setProtectedDirectories(newVal);
}
private String reconfCallerContextEnabled(String newVal) {
Boolean callerContextEnabled;
if (newVal == null) {
callerContextEnabled = HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT;
} else {
callerContextEnabled = Boolean.parseBoolean(newVal);
}
namesystem.setCallerContextEnabled(callerContextEnabled);
return Boolean.toString(callerContextEnabled);
}
@Override // ReconfigurableBase @Override // ReconfigurableBase
protected Configuration getNewConf() { protected Configuration getNewConf() {
return new HdfsConfiguration(); return new HdfsConfiguration();

View File

@ -27,6 +27,7 @@ import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
@ -49,12 +50,15 @@ import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.PathUtils;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
@ -159,6 +163,13 @@ public class TestDecommissionWithStriped {
testDecommission(blockSize * dataBlocks, 9, 1, "testFileFullBlockGroup"); testDecommission(blockSize * dataBlocks, 9, 1, "testFileFullBlockGroup");
} }
@Test(timeout = 120000)
public void testFileMultipleBlockGroups() throws Exception {
LOG.info("Starting test testFileMultipleBlockGroups");
int writeBytes = 2 * blockSize * dataBlocks;
testDecommission(writeBytes, 9, 1, "testFileMultipleBlockGroups");
}
@Test(timeout = 120000) @Test(timeout = 120000)
public void testFileSmallerThanOneCell() throws Exception { public void testFileSmallerThanOneCell() throws Exception {
LOG.info("Starting test testFileSmallerThanOneCell"); LOG.info("Starting test testFileSmallerThanOneCell");
@ -274,7 +285,15 @@ public class TestDecommissionWithStriped {
int deadDecomissioned = fsn.getNumDecomDeadDataNodes(); int deadDecomissioned = fsn.getNumDecomDeadDataNodes();
int liveDecomissioned = fsn.getNumDecomLiveDataNodes(); int liveDecomissioned = fsn.getNumDecomLiveDataNodes();
((HdfsDataInputStream) dfs.open(ecFile)).getAllBlocks(); List<LocatedBlock> lbs = ((HdfsDataInputStream) dfs.open(ecFile))
.getAllBlocks();
// prepare expected block index and token list.
List<HashMap<DatanodeInfo, Byte>> locToIndexList = new ArrayList<>();
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList =
new ArrayList<>();
prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
// Decommission node. Verify that node is decommissioned. // Decommission node. Verify that node is decommissioned.
decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED); decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
@ -290,9 +309,55 @@ public class TestDecommissionWithStriped {
assertNull(checkFile(dfs, ecFile, storageCount, decommisionNodes, numDNs)); assertNull(checkFile(dfs, ecFile, storageCount, decommisionNodes, numDNs));
StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes, StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
null); null);
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
cleanupFile(dfs, ecFile); cleanupFile(dfs, ecFile);
} }
private void prepareBlockIndexAndTokenList(List<LocatedBlock> lbs,
List<HashMap<DatanodeInfo, Byte>> locToIndexList,
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList) {
for (LocatedBlock lb : lbs) {
HashMap<DatanodeInfo, Byte> locToIndex = new HashMap<DatanodeInfo, Byte>();
locToIndexList.add(locToIndex);
HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken =
new HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>();
locToTokenList.add(locToToken);
DatanodeInfo[] di = lb.getLocations();
LocatedStripedBlock stripedBlk = (LocatedStripedBlock) lb;
for (int i = 0; i < di.length; i++) {
locToIndex.put(di[i], stripedBlk.getBlockIndices()[i]);
locToToken.put(di[i], stripedBlk.getBlockTokens()[i]);
}
}
}
/**
* Verify block index and token values. Must update block indices and block
* tokens after sorting.
*/
private void assertBlockIndexAndTokenPosition(List<LocatedBlock> lbs,
List<HashMap<DatanodeInfo, Byte>> locToIndexList,
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList) {
for (int i = 0; i < lbs.size(); i++) {
LocatedBlock lb = lbs.get(i);
LocatedStripedBlock stripedBlk = (LocatedStripedBlock) lb;
HashMap<DatanodeInfo, Byte> locToIndex = locToIndexList.get(i);
HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken =
locToTokenList.get(i);
DatanodeInfo[] di = lb.getLocations();
for (int j = 0; j < di.length; j++) {
Assert.assertEquals("Block index value mismatches after sorting",
(byte) locToIndex.get(di[j]), stripedBlk.getBlockIndices()[j]);
Assert.assertEquals("Block token value mismatches after sorting",
locToToken.get(di[j]), stripedBlk.getBlockTokens()[j]);
}
}
}
private List<DatanodeInfo> getDecommissionDatanode(DistributedFileSystem dfs, private List<DatanodeInfo> getDecommissionDatanode(DistributedFileSystem dfs,
Path ecFile, int writeBytes, int decomNodeCount) throws IOException { Path ecFile, int writeBytes, int decomNodeCount) throws IOException {
ArrayList<DatanodeInfo> decommissionedNodes = new ArrayList<>(); ArrayList<DatanodeInfo> decommissionedNodes = new ArrayList<>();
@ -447,7 +512,12 @@ public class TestDecommissionWithStriped {
return "For block " + blk.getBlock() + " replica on " + nodes[j] return "For block " + blk.getBlock() + " replica on " + nodes[j]
+ " is given as downnode, " + "but is not decommissioned"; + " is given as downnode, " + "but is not decommissioned";
} }
// TODO: Add check to verify that the Decommissioned node (if any) // Decommissioned node (if any) should only be last node in list.
if (j < repl) {
return "For block " + blk.getBlock() + " decommissioned node "
+ nodes[j] + " was not last node in list: " + (j + 1) + " of "
+ nodes.length;
}
// should only be last node in list. // should only be last node in list.
LOG.info("Block " + blk.getBlock() + " replica on " + nodes[j] LOG.info("Block " + blk.getBlock() + " replica on " + nodes[j]
+ " is decommissioned."); + " is decommissioned.");

View File

@ -0,0 +1,557 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class tests the sorting of located striped blocks based on
* decommissioned states.
*/
public class TestSortLocatedStripedBlock {
static final Logger LOG = LoggerFactory
.getLogger(TestSortLocatedStripedBlock.class);
static final int BLK_GROUP_WIDTH = StripedFileTestUtil.NUM_DATA_BLOCKS
+ StripedFileTestUtil.NUM_PARITY_BLOCKS;
static final int NUM_DATA_BLOCKS = StripedFileTestUtil.NUM_DATA_BLOCKS;
static final int NUM_PARITY_BLOCKS = StripedFileTestUtil.NUM_PARITY_BLOCKS;
static DatanodeManager dm;
static final long STALE_INTERVAL = 30 * 1000 * 60;
@BeforeClass
public static void setup() throws IOException {
dm = mockDatanodeManager();
}
/**
* Test to verify sorting with multiple decommissioned datanodes exists in
* storage lists.
*
* We have storage list, marked decommissioned internal blocks with a '
* d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12
* mapping to indices
* 0', 1', 2, 3, 4, 5, 6, 7', 8', 0, 1, 7, 8
*
* Decommissioned node indices: 0, 1, 7, 8
*
* So in the original list nodes d0, d1, d7, d8 are decommissioned state.
*
* After sorting the expected block indices list should be,
* 0, 1, 2, 3, 4, 5, 6, 7, 8, 0', 1', 7', 8'
*
* After sorting the expected storage list will be,
* d9, d10, d2, d3, d4, d5, d6, d11, d12, d0, d1, d7, d8.
*
* Note: after sorting block indices will not be in ascending order.
*/
@Test(timeout = 10000)
public void testWithMultipleDecommnDatanodes() {
LOG.info("Starting test testSortWithMultipleDecommnDatanodes");
int lbsCount = 2; // two located block groups
List<Integer> decommnNodeIndices = new ArrayList<>();
decommnNodeIndices.add(0);
decommnNodeIndices.add(1);
decommnNodeIndices.add(7);
decommnNodeIndices.add(8);
List<Integer> targetNodeIndices = new ArrayList<>();
targetNodeIndices.addAll(decommnNodeIndices);
// map contains decommissioned node details in each located strip block
// which will be used for assertions
HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
lbsCount * decommnNodeIndices.size());
List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount,
NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS, decommnNodeIndices,
targetNodeIndices, decommissionedNodes);
// prepare expected block index and token list.
List<HashMap<DatanodeInfo, Byte>> locToIndexList = new ArrayList<>();
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList =
new ArrayList<>();
prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
dm.sortLocatedBlocks(null, lbs);
assertDecommnNodePosition(BLK_GROUP_WIDTH, decommissionedNodes, lbs);
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
}
/**
* Test to verify sorting with two decommissioned datanodes exists in
* storage lists for the same block index.
*
* We have storage list, marked decommissioned internal blocks with a '
* d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13
* mapping to indices
* 0', 1', 2, 3, 4', 5', 6, 7, 8, 0, 1', 4, 5, 1
*
* Decommissioned node indices: 0', 1', 4', 5', 1'
*
* Here decommissioned has done twice to the datanode block index 1.
* So in the original list nodes d0, d1, d4, d5, d10 are decommissioned state.
*
* After sorting the expected block indices list will be,
* 0, 1, 2, 3, 4, 5, 6, 7, 8, 0', 1', 1', 4', 5'
*
* After sorting the expected storage list will be,
* d9, d13, d2, d3, d11, d12, d6, d7, d8, d0, d1, d10, d4, d5.
*
* Note: after sorting block indices will not be in ascending order.
*/
@Test(timeout = 10000)
public void testTwoDatanodesWithSameBlockIndexAreDecommn() {
LOG.info("Starting test testTwoDatanodesWithSameBlockIndexAreDecommn");
int lbsCount = 2; // two located block groups
List<Integer> decommnNodeIndices = new ArrayList<>();
decommnNodeIndices.add(0);
decommnNodeIndices.add(1);
decommnNodeIndices.add(4);
decommnNodeIndices.add(5);
// representing blockIndex 1, later this also decommissioned
decommnNodeIndices.add(1);
List<Integer> targetNodeIndices = new ArrayList<>();
targetNodeIndices.addAll(decommnNodeIndices);
// map contains decommissioned node details in each located strip block
// which will be used for assertions
HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
lbsCount * decommnNodeIndices.size());
List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount,
NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS, decommnNodeIndices,
targetNodeIndices, decommissionedNodes);
// prepare expected block index and token list.
List<HashMap<DatanodeInfo, Byte>> locToIndexList = new ArrayList<>();
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList =
new ArrayList<>();
prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
dm.sortLocatedBlocks(null, lbs);
assertDecommnNodePosition(BLK_GROUP_WIDTH, decommissionedNodes, lbs);
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
}
/**
* Test to verify sorting with decommissioned datanodes exists in storage
* list which is smaller than stripe size.
*
* We have storage list, marked decommissioned internal blocks with a '
* d0, d1, d2, d3, d6, d7, d8, d9, d10, d11
* mapping to indices
* 0', 1, 2', 3, 6, 7, 8, 0, 2', 2
*
* Decommissioned node indices: 0', 2', 2'
*
* Here decommissioned has done twice to the datanode block index 2.
* So in the original list nodes d0, d2, d10 are decommissioned state.
*
* After sorting the expected block indices list should be,
* 0, 1, 2, 3, 6, 7, 8, 0', 2', 2'
*
* After sorting the expected storage list will be,
* d9, d1, d11, d3, d6, d7, d8, d0, d2, d10.
*
* Note: after sorting block indices will not be in ascending order.
*/
@Test(timeout = 10000)
public void testSmallerThanOneStripeWithMultpleDecommnNodes()
throws Exception {
LOG.info("Starting test testSmallerThanOneStripeWithDecommn");
int lbsCount = 2; // two located block groups
List<Integer> decommnNodeIndices = new ArrayList<>();
decommnNodeIndices.add(0);
decommnNodeIndices.add(2);
// representing blockIndex 1, later this also decommissioned
decommnNodeIndices.add(2);
List<Integer> targetNodeIndices = new ArrayList<>();
targetNodeIndices.addAll(decommnNodeIndices);
// map contains decommissioned node details in each located strip block
// which will be used for assertions
HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
lbsCount * decommnNodeIndices.size());
int dataBlksNum = NUM_DATA_BLOCKS - 2;
List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount, dataBlksNum,
NUM_PARITY_BLOCKS, decommnNodeIndices, targetNodeIndices,
decommissionedNodes);
// prepare expected block index and token list.
List<HashMap<DatanodeInfo, Byte>> locToIndexList = new ArrayList<>();
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList =
new ArrayList<>();
prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
dm.sortLocatedBlocks(null, lbs);
// After this index all are decommissioned nodes.
int blkGrpWidth = dataBlksNum + NUM_PARITY_BLOCKS;
assertDecommnNodePosition(blkGrpWidth, decommissionedNodes, lbs);
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
}
/**
* Test to verify sorting with decommissioned datanodes exists in storage
* list but the corresponding new target datanode doesn't exists.
*
* We have storage list, marked decommissioned internal blocks with a '
* d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11
* mapping to indices
* 0', 1', 2', 3, 4', 5', 6, 7, 8, 0, 2, 4
*
* Decommissioned node indices: 0', 1', 2', 4', 5'
*
* 1 and 5 nodes doesn't exists in the target list. This can happen, the
* target node block corrupted or lost after the successful decommissioning.
* So in the original list nodes corresponding to the decommissioned block
* index 1 and 5 doesn't have any target entries.
*
* After sorting the expected block indices list should be,
* 0, 2, 3, 4, 6, 7, 8, 0', 1', 2', 4', 5'
*
* After sorting the expected storage list will be,
* d9, d10, d3, d11, d6, d7, d8, d0, d1, d2, d4, d5.
*
* Note: after sorting block indices will not be in ascending order.
*/
@Test(timeout = 10000)
public void testTargetDecommnDatanodeDoesntExists() {
LOG.info("Starting test testTargetDecommnDatanodeDoesntExists");
int lbsCount = 2; // two located block groups
List<Integer> decommnNodeIndices = new ArrayList<>();
decommnNodeIndices.add(0);
decommnNodeIndices.add(1);
decommnNodeIndices.add(2);
decommnNodeIndices.add(4);
decommnNodeIndices.add(5);
List<Integer> targetNodeIndices = new ArrayList<>();
targetNodeIndices.add(0);
targetNodeIndices.add(2);
targetNodeIndices.add(4);
// 1 and 5 nodes doesn't exists in the target list. One such case is, the
// target node block corrupted or lost after the successful decommissioning
// map contains decommissioned node details in each located strip block
// which will be used for assertions
HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
lbsCount * decommnNodeIndices.size());
List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount,
NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS, decommnNodeIndices,
targetNodeIndices, decommissionedNodes);
// prepare expected block index and token list.
List<HashMap<DatanodeInfo, Byte>> locToIndexList = new ArrayList<>();
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList =
new ArrayList<>();
prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
dm.sortLocatedBlocks(null, lbs);
// After this index all are decommissioned nodes. Needs to reconstruct two
// more block indices.
int blkGrpWidth = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS - 2;
assertDecommnNodePosition(blkGrpWidth, decommissionedNodes, lbs);
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
}
/**
* Test to verify sorting with multiple in-service and decommissioned
* datanodes exists in storage lists.
*
* We have storage list, marked decommissioned internal blocks with a '
* d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13
* mapping to indices
* 0', 1', 2, 3, 4, 5, 6, 7', 8', 0, 1, 7, 8, 1
*
* Decommissioned node indices: 0', 1', 7', 8'
*
* Additional In-Service node d13 at the end, block index: 1
*
* So in the original list nodes d0, d1, d7, d8 are decommissioned state.
*
* After sorting the expected block indices list will be,
* 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 0', 1', 7', 8'
*
* After sorting the expected storage list will be,
* d9, d10, d2, d3, d4, d5, d6, d11, d12, d13, d0, d1, d7, d8.
*
* Note: after sorting block indices will not be in ascending order.
*/
@Test(timeout = 10000)
public void testWithMultipleInServiceAndDecommnDatanodes() {
LOG.info("Starting test testWithMultipleInServiceAndDecommnDatanodes");
int lbsCount = 2; // two located block groups
List<Integer> decommnNodeIndices = new ArrayList<>();
decommnNodeIndices.add(0);
decommnNodeIndices.add(1);
decommnNodeIndices.add(7);
decommnNodeIndices.add(8);
List<Integer> targetNodeIndices = new ArrayList<>();
targetNodeIndices.addAll(decommnNodeIndices);
// at the end add an additional In-Service node to blockIndex=1
targetNodeIndices.add(1);
// map contains decommissioned node details in each located strip block
// which will be used for assertions
HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
lbsCount * decommnNodeIndices.size());
List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount,
NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS, decommnNodeIndices,
targetNodeIndices, decommissionedNodes);
List <DatanodeInfo> staleDns = new ArrayList<>();
for (LocatedBlock lb : lbs) {
DatanodeInfo[] locations = lb.getLocations();
DatanodeInfo staleDn = locations[locations.length - 1];
staleDn
.setLastUpdateMonotonic(Time.monotonicNow() - (STALE_INTERVAL * 2));
staleDns.add(staleDn);
}
// prepare expected block index and token list.
List<HashMap<DatanodeInfo, Byte>> locToIndexList = new ArrayList<>();
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList =
new ArrayList<>();
prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
dm.sortLocatedBlocks(null, lbs);
assertDecommnNodePosition(BLK_GROUP_WIDTH + 1, decommissionedNodes, lbs);
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
for (LocatedBlock lb : lbs) {
byte[] blockIndices = ((LocatedStripedBlock) lb).getBlockIndices();
// after sorting stale block index will be placed after normal nodes.
Assert.assertEquals("Failed to move stale node to bottom!", 1,
blockIndices[9]);
DatanodeInfo[] locations = lb.getLocations();
// After sorting stale node d13 will be placed after normal nodes
Assert.assertEquals("Failed to move stale dn after normal one!",
staleDns.remove(0), locations[9]);
}
}
/**
* Verify that decommissioned/stale nodes must be positioned after normal
* nodes.
*/
private void assertDecommnNodePosition(int blkGrpWidth,
HashMap<Integer, List<String>> decommissionedNodes,
List<LocatedBlock> lbs) {
for (int i = 0; i < lbs.size(); i++) { // for each block
LocatedBlock blk = lbs.get(i);
DatanodeInfo[] nodes = blk.getLocations();
List<String> decommissionedNodeList = decommissionedNodes.get(i);
for (int j = 0; j < nodes.length; j++) { // for each replica
DatanodeInfo dnInfo = nodes[j];
LOG.info("Block Locations size={}, locs={}, j=", nodes.length,
dnInfo.toString(), j);
if (j < blkGrpWidth) {
Assert.assertEquals("Node shouldn't be decommissioned",
AdminStates.NORMAL, dnInfo.getAdminState());
} else {
// check against decommissioned list
Assert.assertTrue(
"For block " + blk.getBlock() + " decommissioned node " + dnInfo
+ " is not last node in list: " + j + "th index of "
+ nodes.length,
decommissionedNodeList.contains(dnInfo.getXferAddr()));
Assert.assertEquals("Node should be decommissioned",
AdminStates.DECOMMISSIONED, dnInfo.getAdminState());
}
}
}
}
private List<LocatedBlock> createLocatedStripedBlocks(int blkGrpCount,
int dataNumBlk, int numParityBlk, List<Integer> decommnNodeIndices,
List<Integer> targetNodeIndices,
HashMap<Integer, List<String>> decommissionedNodes) {
final List<LocatedBlock> lbs = new ArrayList<>(blkGrpCount);
for (int i = 0; i < blkGrpCount; i++) {
ArrayList<String> decommNodeInfo = new ArrayList<String>();
decommissionedNodes.put(new Integer(i), decommNodeInfo);
List<Integer> dummyDecommnNodeIndices = new ArrayList<>();
dummyDecommnNodeIndices.addAll(decommnNodeIndices);
LocatedStripedBlock lsb = createEachLocatedBlock(dataNumBlk, numParityBlk,
dummyDecommnNodeIndices, targetNodeIndices, decommNodeInfo);
lbs.add(lsb);
}
return lbs;
}
private LocatedStripedBlock createEachLocatedBlock(int numDataBlk,
int numParityBlk, List<Integer> decommnNodeIndices,
List<Integer> targetNodeIndices, ArrayList<String> decommNodeInfo) {
final long blockGroupID = Long.MIN_VALUE;
int totalDns = numDataBlk + numParityBlk + targetNodeIndices.size();
DatanodeInfo[] locs = new DatanodeInfo[totalDns];
String[] storageIDs = new String[totalDns];
StorageType[] storageTypes = new StorageType[totalDns];
byte[] blkIndices = new byte[totalDns];
// Adding data blocks
int index = 0;
for (; index < numDataBlk; index++) {
blkIndices[index] = (byte) index;
// Location port always equal to logical index of a block,
// for easier verification
locs[index] = DFSTestUtil.getLocalDatanodeInfo(blkIndices[index]);
locs[index].setLastUpdateMonotonic(Time.monotonicNow());
storageIDs[index] = locs[index].getDatanodeUuid();
storageTypes[index] = StorageType.DISK;
// set decommissioned state
if (decommnNodeIndices.contains(index)) {
locs[index].setDecommissioned();
decommNodeInfo.add(locs[index].toString());
// Removing it from the list to ensure that all the given nodes are
// successfully marked as decomissioned.
decommnNodeIndices.remove(new Integer(index));
}
}
// Adding parity blocks after data blocks
index = NUM_DATA_BLOCKS;
for (int j = numDataBlk; j < numDataBlk + numParityBlk; j++, index++) {
blkIndices[j] = (byte) index;
// Location port always equal to logical index of a block,
// for easier verification
locs[j] = DFSTestUtil.getLocalDatanodeInfo(blkIndices[j]);
locs[j].setLastUpdateMonotonic(Time.monotonicNow());
storageIDs[j] = locs[j].getDatanodeUuid();
storageTypes[j] = StorageType.DISK;
// set decommissioned state
if (decommnNodeIndices.contains(index)) {
locs[j].setDecommissioned();
decommNodeInfo.add(locs[j].toString());
// Removing it from the list to ensure that all the given nodes are
// successfully marked as decomissioned.
decommnNodeIndices.remove(new Integer(index));
}
}
// Add extra target nodes to storage list after the parity blocks
int basePortValue = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
index = numDataBlk + numParityBlk;
for (int i = 0; i < targetNodeIndices.size(); i++, index++) {
int blkIndexPos = targetNodeIndices.get(i);
blkIndices[index] = (byte) blkIndexPos;
// Location port always equal to logical index of a block,
// for easier verification
locs[index] = DFSTestUtil.getLocalDatanodeInfo(basePortValue++);
locs[index].setLastUpdateMonotonic(Time.monotonicNow());
storageIDs[index] = locs[index].getDatanodeUuid();
storageTypes[index] = StorageType.DISK;
// set decommissioned state. This can happen, the target node is again
// decommissioned by administrator
if (decommnNodeIndices.contains(blkIndexPos)) {
locs[index].setDecommissioned();
decommNodeInfo.add(locs[index].toString());
// Removing it from the list to ensure that all the given nodes are
// successfully marked as decomissioned.
decommnNodeIndices.remove(new Integer(blkIndexPos));
}
}
return new LocatedStripedBlock(
new ExtendedBlock("pool", blockGroupID,
StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE, 1001),
locs, storageIDs, storageTypes, blkIndices, 0, false, null);
}
private static DatanodeManager mockDatanodeManager() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
STALE_INTERVAL);
FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
BlockManager bm = Mockito.mock(BlockManager.class);
BlockReportLeaseManager blm = new BlockReportLeaseManager(conf);
Mockito.when(bm.getBlockReportLeaseManager()).thenReturn(blm);
DatanodeManager dm = new DatanodeManager(bm, fsn, conf);
return dm;
}
private void prepareBlockIndexAndTokenList(List<LocatedBlock> lbs,
List<HashMap<DatanodeInfo, Byte>> locToIndexList,
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList) {
for (LocatedBlock lb : lbs) {
HashMap<DatanodeInfo, Byte> locToIndex = new HashMap<DatanodeInfo, Byte>();
locToIndexList.add(locToIndex);
HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken =
new HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>();
locToTokenList.add(locToToken);
DatanodeInfo[] di = lb.getLocations();
LocatedStripedBlock stripedBlk = (LocatedStripedBlock) lb;
for (int i = 0; i < di.length; i++) {
locToIndex.put(di[i], stripedBlk.getBlockIndices()[i]);
locToToken.put(di[i], stripedBlk.getBlockTokens()[i]);
}
}
}
/**
* Verify block index and token values. Must update block indices and block
* tokens after sorting.
*/
private void assertBlockIndexAndTokenPosition(List<LocatedBlock> lbs,
List<HashMap<DatanodeInfo, Byte>> locToIndexList,
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList) {
for (int i = 0; i < lbs.size(); i++) {
LocatedBlock lb = lbs.get(i);
LocatedStripedBlock stripedBlk = (LocatedStripedBlock) lb;
HashMap<DatanodeInfo, Byte> locToIndex = locToIndexList.get(i);
HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken =
locToTokenList.get(i);
DatanodeInfo[] di = lb.getLocations();
for (int j = 0; j < di.length; j++) {
Assert.assertEquals("Block index value mismatches after sorting",
(byte) locToIndex.get(di[j]), stripedBlk.getBlockIndices()[j]);
Assert.assertEquals("Block token value mismatches after sorting",
locToToken.get(di[j]), stripedBlk.getBlockTokens()[j]);
}
}
}
}

View File

@ -81,7 +81,6 @@ public class TestBlockReplacement {
long bytesToSend = TOTAL_BYTES; long bytesToSend = TOTAL_BYTES;
long start = Time.monotonicNow(); long start = Time.monotonicNow();
DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec); DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec);
long totalBytes = 0L;
long bytesSent = 1024*512L; // 0.5MB long bytesSent = 1024*512L; // 0.5MB
throttler.throttle(bytesSent); throttler.throttle(bytesSent);
bytesToSend -= bytesSent; bytesToSend -= bytesSent;
@ -93,7 +92,7 @@ public class TestBlockReplacement {
} catch (InterruptedException ignored) {} } catch (InterruptedException ignored) {}
throttler.throttle(bytesToSend); throttler.throttle(bytesToSend);
long end = Time.monotonicNow(); long end = Time.monotonicNow();
assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec); assertTrue(TOTAL_BYTES * 1000 / (end - start) <= bandwidthPerSec);
} }
@Test @Test

View File

@ -34,6 +34,8 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -229,9 +231,22 @@ public class TestDataNodeVolumeFailureToleration {
prepareDirToFail(dirs[i]); prepareDirToFail(dirs[i]);
} }
restartDatanodes(volumesTolerated, manageDfsDirs); restartDatanodes(volumesTolerated, manageDfsDirs);
assertEquals(expectedBPServiceState, cluster.getDataNodes().get(0) } catch (DiskErrorException e) {
.isBPServiceAlive(cluster.getNamesystem().getBlockPoolId())); GenericTestUtils.assertExceptionContains("Invalid value configured for "
+ "dfs.datanode.failed.volumes.tolerated", e);
} finally { } finally {
boolean bpServiceState;
// If the datanode not registered successfully,
// because the invalid value configured for tolerated volumes
if (cluster.getDataNodes().size() == 0) {
bpServiceState = false;
} else {
bpServiceState =
cluster.getDataNodes().get(0)
.isBPServiceAlive(cluster.getNamesystem().getBlockPoolId());
}
assertEquals(expectedBPServiceState, bpServiceState);
for (File dir : dirs) { for (File dir : dirs) {
FileUtil.chmod(dir.toString(), "755"); FileUtil.chmod(dir.toString(), "755");
} }

View File

@ -572,4 +572,64 @@ public class TestSpaceReservation {
return numFailures; return numFailures;
} }
} }
@Test(timeout = 30000)
public void testReservedSpaceForAppend() throws Exception {
final short replication = 3;
startCluster(BLOCK_SIZE, replication, -1);
final String methodName = GenericTestUtils.getMethodName();
final Path file = new Path("/" + methodName + ".01.dat");
// Write 1 byte to the file and kill the writer.
FSDataOutputStream os = fs.create(file, replication);
os.write(new byte[1024]);
os.close();
final Path file2 = new Path("/" + methodName + ".02.dat");
// Write 1 byte to the file and keep it open.
FSDataOutputStream os2 = fs.create(file2, replication);
os2.write(new byte[1]);
os2.hflush();
int expectedFile2Reserved = BLOCK_SIZE - 1;
checkReservedSpace(expectedFile2Reserved);
// append one byte and verify reservedspace before and after closing
os = fs.append(file);
os.write(new byte[1]);
os.hflush();
int expectedFile1Reserved = BLOCK_SIZE - 1025;
checkReservedSpace(expectedFile2Reserved + expectedFile1Reserved);
os.close();
checkReservedSpace(expectedFile2Reserved);
// append one byte and verify reservedspace before and after abort
os = fs.append(file);
os.write(new byte[1]);
os.hflush();
expectedFile1Reserved--;
checkReservedSpace(expectedFile2Reserved + expectedFile1Reserved);
DFSTestUtil.abortStream(((DFSOutputStream) os.getWrappedStream()));
checkReservedSpace(expectedFile2Reserved);
}
private void checkReservedSpace(final long expectedReserved) throws TimeoutException,
InterruptedException, IOException {
for (final DataNode dn : cluster.getDataNodes()) {
try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset()
.getFsVolumeReferences()) {
final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
LOG.info(
"dn " + dn.getDisplayName() + " space : " + volume
.getReservedForReplicas() + ", Expected ReservedSpace :"
+ expectedReserved);
return (volume.getReservedForReplicas() == expectedReserved);
}
}, 100, 3000);
}
}
}
} }

View File

@ -34,6 +34,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
@ -50,13 +52,60 @@ public class TestNameNodeReconfigure {
public void setUp() throws IOException { public void setUp() throws IOException {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build(); cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
}
@Test
public void testReconfigureCallerContextEnabled()
throws ReconfigurationException {
final NameNode nameNode = cluster.getNameNode();
final FSNamesystem nameSystem = nameNode.getNamesystem();
// try invalid values
nameNode.reconfigureProperty(HADOOP_CALLER_CONTEXT_ENABLED_KEY, "text");
assertEquals(HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value", false,
nameSystem.getCallerContextEnabled());
assertEquals(
HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value",
false,
nameNode.getConf().getBoolean(HADOOP_CALLER_CONTEXT_ENABLED_KEY,
HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT));
// enable CallerContext
nameNode.reconfigureProperty(HADOOP_CALLER_CONTEXT_ENABLED_KEY, "true");
assertEquals(HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value", true,
nameSystem.getCallerContextEnabled());
assertEquals(
HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value",
true,
nameNode.getConf().getBoolean(HADOOP_CALLER_CONTEXT_ENABLED_KEY,
HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT));
// disable CallerContext
nameNode.reconfigureProperty(HADOOP_CALLER_CONTEXT_ENABLED_KEY, "false");
assertEquals(HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value", false,
nameSystem.getCallerContextEnabled());
assertEquals(
HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value",
false,
nameNode.getConf().getBoolean(HADOOP_CALLER_CONTEXT_ENABLED_KEY,
HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT));
// revert to default
nameNode.reconfigureProperty(HADOOP_CALLER_CONTEXT_ENABLED_KEY, null);
// verify default
assertEquals(HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value", false,
nameSystem.getCallerContextEnabled());
assertEquals(HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value", null,
nameNode.getConf().get(HADOOP_CALLER_CONTEXT_ENABLED_KEY));
} }
/** /**
* Test that we can modify configuration properties. * Test that we can modify configuration properties.
*/ */
@Test @Test
public void testReconfigure() throws ReconfigurationException { public void testReconfigureHearbeatCheck1() throws ReconfigurationException {
final NameNode nameNode = cluster.getNameNode(); final NameNode nameNode = cluster.getNameNode();
final DatanodeManager datanodeManager = nameNode.namesystem final DatanodeManager datanodeManager = nameNode.namesystem
.getBlockManager().getDatanodeManager(); .getBlockManager().getDatanodeManager();

View File

@ -45,7 +45,67 @@ public class TestHdfsConfigFields extends TestConfigurationFieldsBase {
// Set error modes // Set error modes
errorIfMissingConfigProps = true; errorIfMissingConfigProps = true;
errorIfMissingXmlProps = false; errorIfMissingXmlProps = true;
// Initialize used variables
configurationPropsToSkipCompare = new HashSet<String>();
// Ignore testing based parameter
configurationPropsToSkipCompare.add("ignore.secure.ports.for.testing");
// Remove deprecated properties listed in Configuration#DeprecationDelta
configurationPropsToSkipCompare.add(DFSConfigKeys.DFS_DF_INTERVAL_KEY);
// Remove default properties
configurationPropsToSkipCompare
.add(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_DEFAULT);
configurationPropsToSkipCompare
.add(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
// Remove support property
configurationPropsToSkipCompare
.add(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY);
configurationPropsToSkipCompare
.add(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY);
// Purposely hidden, based on comments in DFSConfigKeys
configurationPropsToSkipCompare
.add(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY);
// Fully deprecated properties?
configurationPropsToSkipCompare
.add("dfs.corruptfilesreturned.max");
configurationPropsToSkipCompare
.add("dfs.datanode.hdfs-blocks-metadata.enabled");
configurationPropsToSkipCompare
.add("dfs.metrics.session-id");
configurationPropsToSkipCompare
.add("dfs.datanode.synconclose");
configurationPropsToSkipCompare
.add("dfs.datanode.non.local.lazy.persist");
configurationPropsToSkipCompare
.add("dfs.namenode.tolerate.heartbeat.multiplier");
configurationPropsToSkipCompare
.add("dfs.namenode.stripe.min");
configurationPropsToSkipCompare
.add("dfs.namenode.replqueue.threshold-pct");
// Removed by HDFS-6440
configurationPropsToSkipCompare
.add("dfs.ha.log-roll.rpc.timeout");
// Example (not real) property in hdfs-default.xml
configurationPropsToSkipCompare.add("dfs.ha.namenodes");
// Property used for internal testing only
configurationPropsToSkipCompare
.add(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION);
// Property not intended for users
configurationPropsToSkipCompare
.add(DFSConfigKeys.DFS_DATANODE_STARTUP_KEY);
configurationPropsToSkipCompare
.add(DFSConfigKeys.DFS_NAMENODE_STARTUP_KEY);
// Allocate // Allocate
xmlPropsToSkipCompare = new HashSet<String>(); xmlPropsToSkipCompare = new HashSet<String>();
@ -58,21 +118,12 @@ public class TestHdfsConfigFields extends TestConfigurationFieldsBase {
// Used dynamically as part of DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX // Used dynamically as part of DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX
xmlPropsToSkipCompare.add("dfs.namenode.edits.journal-plugin.qjournal"); xmlPropsToSkipCompare.add("dfs.namenode.edits.journal-plugin.qjournal");
// Example (not real) property in hdfs-default.xml
xmlPropsToSkipCompare.add("dfs.ha.namenodes.EXAMPLENAMESERVICE");
// Defined in org.apache.hadoop.fs.CommonConfigurationKeys // Defined in org.apache.hadoop.fs.CommonConfigurationKeys
xmlPropsToSkipCompare.add("hadoop.user.group.metrics.percentiles.intervals"); xmlPropsToSkipCompare.add("hadoop.user.group.metrics.percentiles.intervals");
// Used oddly by DataNode to create new config String // Used oddly by DataNode to create new config String
xmlPropsToSkipCompare.add("hadoop.hdfs.configuration.version"); xmlPropsToSkipCompare.add("hadoop.hdfs.configuration.version");
// Kept in the NfsConfiguration class in the hadoop-hdfs-nfs module
xmlPrefixToSkipCompare.add("nfs");
// Not a hardcoded property. Used by SaslRpcClient
xmlPrefixToSkipCompare.add("dfs.namenode.kerberos.principal.pattern");
// Skip comparing in branch-2. Removed in trunk with HDFS-7985. // Skip comparing in branch-2. Removed in trunk with HDFS-7985.
xmlPropsToSkipCompare.add("dfs.webhdfs.enabled"); xmlPropsToSkipCompare.add("dfs.webhdfs.enabled");
@ -82,5 +133,21 @@ public class TestHdfsConfigFields extends TestConfigurationFieldsBase {
// Ignore HTrace properties // Ignore HTrace properties
xmlPropsToSkipCompare.add("fs.client.htrace"); xmlPropsToSkipCompare.add("fs.client.htrace");
xmlPropsToSkipCompare.add("hadoop.htrace"); xmlPropsToSkipCompare.add("hadoop.htrace");
// Ignore SpanReceiveHost properties
xmlPropsToSkipCompare.add("dfs.htrace.spanreceiver.classes");
xmlPropsToSkipCompare.add("dfs.client.htrace.spanreceiver.classes");
// Remove deprecated properties listed in Configuration#DeprecationDelta
xmlPropsToSkipCompare.add(DFSConfigKeys.DFS_DF_INTERVAL_KEY);
// Kept in the NfsConfiguration class in the hadoop-hdfs-nfs module
xmlPrefixToSkipCompare.add("nfs");
// Not a hardcoded property. Used by SaslRpcClient
xmlPrefixToSkipCompare.add("dfs.namenode.kerberos.principal.pattern");
// Skip over example property
xmlPrefixToSkipCompare.add("dfs.ha.namenodes");
} }
} }

View File

@ -111,9 +111,6 @@ public class TestJMXGet {
jmx.getValue("NumLiveDataNodes"))); jmx.getValue("NumLiveDataNodes")));
assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")), assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")),
getMetrics("FSNamesystem")); getMetrics("FSNamesystem"));
DFSTestUtil.waitForMetric(jmx, "NumOpenConnections", numDatanodes);
assertEquals(numDatanodes, Integer.parseInt(
jmx.getValue("NumOpenConnections")));
cluster.shutdown(); cluster.shutdown();
MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer(); MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();

View File

@ -537,7 +537,7 @@ class Fetcher<K,V> extends Thread {
+ " len: " + compressedLength + " to " + mapOutput.getDescription()); + " len: " + compressedLength + " to " + mapOutput.getDescription());
mapOutput.shuffle(host, is, compressedLength, decompressedLength, mapOutput.shuffle(host, is, compressedLength, decompressedLength,
metrics, reporter); metrics, reporter);
} catch (java.lang.InternalError e) { } catch (java.lang.InternalError | Exception e) {
LOG.warn("Failed to shuffle for fetcher#"+id, e); LOG.warn("Failed to shuffle for fetcher#"+id, e);
throw new IOException(e); throw new IOException(e);
} }

View File

@ -344,6 +344,43 @@ public class TestFetcher {
verify(ss, times(1)).copyFailed(map1ID, host, true, false); verify(ss, times(1)).copyFailed(map1ID, host, true, false);
} }
@SuppressWarnings("unchecked")
@Test(timeout=10000)
public void testCopyFromHostOnAnyException() throws Exception {
InMemoryMapOutput<Text, Text> immo = mock(InMemoryMapOutput.class);
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(
SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getInputStream()).thenReturn(in);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
.thenReturn(immo);
doThrow(new ArrayIndexOutOfBoundsException()).when(immo)
.shuffle(any(MapHost.class), any(InputStream.class), anyLong(),
anyLong(), any(ShuffleClientMetrics.class), any(Reporter.class));
underTest.copyFromHost(host);
verify(connection)
.addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH,
encHash);
verify(ss, times(1)).copyFailed(map1ID, host, true, false);
}
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
@Test(timeout=10000) @Test(timeout=10000)
public void testCopyFromHostWithRetry() throws Exception { public void testCopyFromHostWithRetry() throws Exception {

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.fs.s3a;
import com.amazonaws.services.s3.AmazonS3Client; import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.amazonaws.services.s3.model.S3ObjectInputStream;
import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FSInputStream;
@ -37,82 +36,128 @@ public class S3AInputStream extends FSInputStream {
private long pos; private long pos;
private boolean closed; private boolean closed;
private S3ObjectInputStream wrappedStream; private S3ObjectInputStream wrappedStream;
private FileSystem.Statistics stats; private final FileSystem.Statistics stats;
private AmazonS3Client client; private final AmazonS3Client client;
private String bucket; private final String bucket;
private String key; private final String key;
private long contentLength; private final long contentLength;
private final String uri;
public static final Logger LOG = S3AFileSystem.LOG; public static final Logger LOG = S3AFileSystem.LOG;
public static final long CLOSE_THRESHOLD = 4096; public static final long CLOSE_THRESHOLD = 4096;
public S3AInputStream(String bucket, String key, long contentLength, AmazonS3Client client, // Used by lazy seek
FileSystem.Statistics stats) { private long nextReadPos;
//Amount of data requested from the request
private long requestedStreamLen;
public S3AInputStream(String bucket, String key, long contentLength,
AmazonS3Client client, FileSystem.Statistics stats) {
this.bucket = bucket; this.bucket = bucket;
this.key = key; this.key = key;
this.contentLength = contentLength; this.contentLength = contentLength;
this.client = client; this.client = client;
this.stats = stats; this.stats = stats;
this.pos = 0; this.pos = 0;
this.nextReadPos = 0;
this.closed = false; this.closed = false;
this.wrappedStream = null; this.wrappedStream = null;
this.uri = "s3a://" + this.bucket + "/" + this.key;
} }
private void openIfNeeded() throws IOException { /**
if (wrappedStream == null) { * Opens up the stream at specified target position and for given length.
reopen(0); *
} * @param targetPos target position
} * @param length length requested
* @throws IOException
private synchronized void reopen(long pos) throws IOException { */
private synchronized void reopen(long targetPos, long length)
throws IOException {
requestedStreamLen = (length < 0) ? this.contentLength :
Math.max(this.contentLength, (CLOSE_THRESHOLD + (targetPos + length)));
if (wrappedStream != null) { if (wrappedStream != null) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Aborting old stream to open at pos " + pos); LOG.debug("Closing the previous stream");
} }
wrappedStream.abort(); closeStream(requestedStreamLen);
} }
if (pos < 0) { if (LOG.isDebugEnabled()) {
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK LOG.debug("Requesting for "
+" " + pos); + "targetPos=" + targetPos
+ ", length=" + length
+ ", requestedStreamLen=" + requestedStreamLen
+ ", streamPosition=" + pos
+ ", nextReadPosition=" + nextReadPos
);
} }
if (contentLength > 0 && pos > contentLength-1) { GetObjectRequest request = new GetObjectRequest(bucket, key)
throw new EOFException( .withRange(targetPos, requestedStreamLen);
FSExceptionMessages.CANNOT_SEEK_PAST_EOF
+ " " + pos);
}
LOG.debug("Actually opening file " + key + " at pos " + pos);
GetObjectRequest request = new GetObjectRequest(bucket, key);
request.setRange(pos, contentLength-1);
wrappedStream = client.getObject(request).getObjectContent(); wrappedStream = client.getObject(request).getObjectContent();
if (wrappedStream == null) { if (wrappedStream == null) {
throw new IOException("Null IO stream"); throw new IOException("Null IO stream");
} }
this.pos = pos; this.pos = targetPos;
} }
@Override @Override
public synchronized long getPos() throws IOException { public synchronized long getPos() throws IOException {
return pos; return (nextReadPos < 0) ? 0 : nextReadPos;
} }
@Override @Override
public synchronized void seek(long pos) throws IOException { public synchronized void seek(long targetPos) throws IOException {
checkNotClosed(); checkNotClosed();
if (this.pos == pos) { // Do not allow negative seek
if (targetPos < 0) {
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK
+ " " + targetPos);
}
if (this.contentLength <= 0) {
return; return;
} }
LOG.debug( // Lazy seek
"Reopening " + this.key + " to seek to new offset " + (pos - this.pos)); nextReadPos = targetPos;
reopen(pos); }
/**
* Adjust the stream to a specific position.
*
* @param targetPos target seek position
* @param length length of content that needs to be read from targetPos
* @throws IOException
*/
private void seekInStream(long targetPos, long length) throws IOException {
checkNotClosed();
if (wrappedStream == null) {
return;
}
// compute how much more to skip
long diff = targetPos - pos;
if (targetPos > pos) {
if ((diff + length) <= wrappedStream.available()) {
// already available in buffer
pos += wrappedStream.skip(diff);
if (pos != targetPos) {
throw new IOException("Failed to seek to " + targetPos
+ ". Current position " + pos);
}
return;
}
}
// close the stream; if read the object will be opened at the new pos
closeStream(this.requestedStreamLen);
pos = targetPos;
} }
@Override @Override
@ -120,27 +165,48 @@ public class S3AInputStream extends FSInputStream {
return false; return false;
} }
/**
* Perform lazy seek and adjust stream to correct position for reading.
*
* @param targetPos position from where data should be read
* @param len length of the content that needs to be read
*/
private void lazySeek(long targetPos, long len) throws IOException {
//For lazy seek
if (targetPos != this.pos) {
seekInStream(targetPos, len);
}
//re-open at specific location if needed
if (wrappedStream == null) {
reopen(targetPos, len);
}
}
@Override @Override
public synchronized int read() throws IOException { public synchronized int read() throws IOException {
checkNotClosed(); checkNotClosed();
if (this.contentLength == 0 || (nextReadPos >= contentLength)) {
return -1;
}
openIfNeeded(); lazySeek(nextReadPos, 1);
int byteRead; int byteRead;
try { try {
byteRead = wrappedStream.read(); byteRead = wrappedStream.read();
} catch (SocketTimeoutException e) { } catch (SocketTimeoutException | SocketException e) {
LOG.info("Got timeout while trying to read from stream, trying to recover " + e); LOG.info("Got exception while trying to read from stream,"
reopen(pos); + " trying to recover " + e);
byteRead = wrappedStream.read(); reopen(pos, 1);
} catch (SocketException e) {
LOG.info("Got socket exception while trying to read from stream, trying to recover " + e);
reopen(pos);
byteRead = wrappedStream.read(); byteRead = wrappedStream.read();
} catch (EOFException e) {
return -1;
} }
if (byteRead >= 0) { if (byteRead >= 0) {
pos++; pos++;
nextReadPos++;
} }
if (stats != null && byteRead >= 0) { if (stats != null && byteRead >= 0) {
@ -150,26 +216,34 @@ public class S3AInputStream extends FSInputStream {
} }
@Override @Override
public synchronized int read(byte[] buf, int off, int len) throws IOException { public synchronized int read(byte[] buf, int off, int len)
throws IOException {
checkNotClosed(); checkNotClosed();
openIfNeeded(); validatePositionedReadArgs(nextReadPos, buf, off, len);
if (len == 0) {
return 0;
}
if (this.contentLength == 0 || (nextReadPos >= contentLength)) {
return -1;
}
lazySeek(nextReadPos, len);
int byteRead; int byteRead;
try { try {
byteRead = wrappedStream.read(buf, off, len); byteRead = wrappedStream.read(buf, off, len);
} catch (SocketTimeoutException e) { } catch (SocketTimeoutException | SocketException e) {
LOG.info("Got timeout while trying to read from stream, trying to recover " + e); LOG.info("Got exception while trying to read from stream,"
reopen(pos); + " trying to recover " + e);
byteRead = wrappedStream.read(buf, off, len); reopen(pos, len);
} catch (SocketException e) {
LOG.info("Got socket exception while trying to read from stream, trying to recover " + e);
reopen(pos);
byteRead = wrappedStream.read(buf, off, len); byteRead = wrappedStream.read(buf, off, len);
} }
if (byteRead > 0) { if (byteRead > 0) {
pos += byteRead; pos += byteRead;
nextReadPos += byteRead;
} }
if (stats != null && byteRead > 0) { if (stats != null && byteRead > 0) {
@ -189,15 +263,43 @@ public class S3AInputStream extends FSInputStream {
public synchronized void close() throws IOException { public synchronized void close() throws IOException {
super.close(); super.close();
closed = true; closed = true;
closeStream(this.contentLength);
}
/**
* Close a stream: decide whether to abort or close, based on
* the length of the stream and the current position.
*
* This does not set the {@link #closed} flag.
* @param length length of the stream.
* @throws IOException
*/
private void closeStream(long length) throws IOException {
if (wrappedStream != null) { if (wrappedStream != null) {
if (contentLength - pos <= CLOSE_THRESHOLD) { String reason = null;
// Close, rather than abort, so that the http connection can be reused. boolean shouldAbort = length - pos > CLOSE_THRESHOLD;
wrappedStream.close(); if (!shouldAbort) {
} else { try {
reason = "Closed stream";
wrappedStream.close();
} catch (IOException e) {
// exception escalates to an abort
LOG.debug("When closing stream", e);
shouldAbort = true;
}
}
if (shouldAbort) {
// Abort, rather than just close, the underlying stream. Otherwise, the // Abort, rather than just close, the underlying stream. Otherwise, the
// remaining object payload is read from S3 while closing the stream. // remaining object payload is read from S3 while closing the stream.
wrappedStream.abort(); wrappedStream.abort();
reason = "Closed stream with abort";
} }
if (LOG.isDebugEnabled()) {
LOG.debug(reason + "; streamPos=" + pos
+ ", nextReadPos=" + nextReadPos
+ ", contentLength=" + length);
}
wrappedStream = null;
} }
} }
@ -216,4 +318,55 @@ public class S3AInputStream extends FSInputStream {
public boolean markSupported() { public boolean markSupported() {
return false; return false;
} }
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(
"S3AInputStream{");
sb.append(uri);
sb.append(" pos=").append(pos);
sb.append(" nextReadPos=").append(nextReadPos);
sb.append(" contentLength=").append(contentLength);
sb.append('}');
return sb.toString();
}
/**
* Subclass {@code readFully()} operation which only seeks at the start
* of the series of operations; seeking back at the end.
*
* This is significantly higher performance if multiple read attempts are
* needed to fetch the data, as it does not break the HTTP connection.
*
* To maintain thread safety requirements, this operation is synchronized
* for the duration of the sequence.
* {@inheritDoc}
*
*/
@Override
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException {
checkNotClosed();
validatePositionedReadArgs(position, buffer, offset, length);
if (length == 0) {
return;
}
int nread = 0;
synchronized (this) {
long oldPos = getPos();
try {
seek(position);
while (nread < length) {
int nbytes = read(buffer, offset + nread, length - nread);
if (nbytes < 0) {
throw new EOFException(FSExceptionMessages.EOF_IN_READ_FULLY);
}
nread += nbytes;
}
} finally {
seek(oldPos);
}
}
}
} }

View File

@ -123,6 +123,7 @@ public class TestS3AConfiguration {
@Test @Test
public void testProxyPortWithoutHost() throws Exception { public void testProxyPortWithoutHost() throws Exception {
conf = new Configuration(); conf = new Configuration();
conf.unset(Constants.PROXY_HOST);
conf.setInt(Constants.MAX_ERROR_RETRIES, 2); conf.setInt(Constants.MAX_ERROR_RETRIES, 2);
conf.setInt(Constants.PROXY_PORT, 1); conf.setInt(Constants.PROXY_PORT, 1);
try { try {
@ -140,6 +141,7 @@ public class TestS3AConfiguration {
@Test @Test
public void testAutomaticProxyPortSelection() throws Exception { public void testAutomaticProxyPortSelection() throws Exception {
conf = new Configuration(); conf = new Configuration();
conf.unset(Constants.PROXY_PORT);
conf.setInt(Constants.MAX_ERROR_RETRIES, 2); conf.setInt(Constants.MAX_ERROR_RETRIES, 2);
conf.set(Constants.PROXY_HOST, "127.0.0.1"); conf.set(Constants.PROXY_HOST, "127.0.0.1");
conf.set(Constants.SECURE_CONNECTIONS, "true"); conf.set(Constants.SECURE_CONNECTIONS, "true");

View File

@ -44,6 +44,15 @@ public class TestS3ADeleteManyFiles extends S3AScaleTestBase {
@Rule @Rule
public Timeout testTimeout = new Timeout(30 * 60 * 1000); public Timeout testTimeout = new Timeout(30 * 60 * 1000);
/**
* CAUTION: If this test starts failing, please make sure that the
* {@link org.apache.hadoop.fs.s3a.Constants#MAX_THREADS} configuration is not
* set too low. Alternatively, consider reducing the
* <code>scale.test.operation.count</code> parameter in
* <code>getOperationCount()</code>.
*
* @see #getOperationCount()
*/
@Test @Test
public void testBulkRenameAndDelete() throws Throwable { public void testBulkRenameAndDelete() throws Throwable {
final Path scaleTestDir = getTestPath(); final Path scaleTestDir = getTestPath();

View File

@ -725,6 +725,8 @@ public class NativeAzureFileSystem extends FileSystem {
// Return to the caller with the result. // Return to the caller with the result.
// //
return result; return result;
} catch(EOFException e) {
return -1;
} catch(IOException e) { } catch(IOException e) {
Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(e); Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(e);
@ -773,7 +775,7 @@ public class NativeAzureFileSystem extends FileSystem {
pos += result; pos += result;
} }
if (null != statistics) { if (null != statistics && result > 0) {
statistics.incrementBytesRead(result); statistics.incrementBytesRead(result);
} }

View File

@ -4701,7 +4701,6 @@
"dfs.namenode.avoid.read.stale.datanode" : "false", "dfs.namenode.avoid.read.stale.datanode" : "false",
"mapreduce.job.reduces" : "0", "mapreduce.job.reduces" : "0",
"mapreduce.map.sort.spill.percent" : "0.8", "mapreduce.map.sort.spill.percent" : "0.8",
"dfs.client.file-block-storage-locations.timeout" : "60",
"dfs.datanode.drop.cache.behind.writes" : "false", "dfs.datanode.drop.cache.behind.writes" : "false",
"mapreduce.job.end-notification.retry.interval" : "1", "mapreduce.job.end-notification.retry.interval" : "1",
"mapreduce.job.maps" : "96", "mapreduce.job.maps" : "96",
@ -4800,7 +4799,6 @@
"dfs.datanode.directoryscan.interval" : "21600", "dfs.datanode.directoryscan.interval" : "21600",
"yarn.resourcemanager.address" : "a2115.smile.com:8032", "yarn.resourcemanager.address" : "a2115.smile.com:8032",
"yarn.nodemanager.health-checker.interval-ms" : "600000", "yarn.nodemanager.health-checker.interval-ms" : "600000",
"dfs.client.file-block-storage-locations.num-threads" : "10",
"yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs" : "86400", "yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs" : "86400",
"mapreduce.reduce.markreset.buffer.percent" : "0.0", "mapreduce.reduce.markreset.buffer.percent" : "0.0",
"hadoop.security.group.mapping.ldap.directory.search.timeout" : "10000", "hadoop.security.group.mapping.ldap.directory.search.timeout" : "10000",
@ -9806,7 +9804,6 @@
"dfs.namenode.avoid.read.stale.datanode" : "false", "dfs.namenode.avoid.read.stale.datanode" : "false",
"mapreduce.job.reduces" : "0", "mapreduce.job.reduces" : "0",
"mapreduce.map.sort.spill.percent" : "0.8", "mapreduce.map.sort.spill.percent" : "0.8",
"dfs.client.file-block-storage-locations.timeout" : "60",
"dfs.datanode.drop.cache.behind.writes" : "false", "dfs.datanode.drop.cache.behind.writes" : "false",
"mapreduce.job.end-notification.retry.interval" : "1", "mapreduce.job.end-notification.retry.interval" : "1",
"mapreduce.job.maps" : "96", "mapreduce.job.maps" : "96",
@ -9905,7 +9902,6 @@
"dfs.datanode.directoryscan.interval" : "21600", "dfs.datanode.directoryscan.interval" : "21600",
"yarn.resourcemanager.address" : "a2115.smile.com:8032", "yarn.resourcemanager.address" : "a2115.smile.com:8032",
"yarn.nodemanager.health-checker.interval-ms" : "600000", "yarn.nodemanager.health-checker.interval-ms" : "600000",
"dfs.client.file-block-storage-locations.num-threads" : "10",
"yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs" : "86400", "yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs" : "86400",
"mapreduce.reduce.markreset.buffer.percent" : "0.0", "mapreduce.reduce.markreset.buffer.percent" : "0.0",
"hadoop.security.group.mapping.ldap.directory.search.timeout" : "10000", "hadoop.security.group.mapping.ldap.directory.search.timeout" : "10000",
@ -10412,7 +10408,6 @@
"dfs.namenode.avoid.read.stale.datanode" : "false", "dfs.namenode.avoid.read.stale.datanode" : "false",
"mapreduce.job.reduces" : "0", "mapreduce.job.reduces" : "0",
"mapreduce.map.sort.spill.percent" : "0.8", "mapreduce.map.sort.spill.percent" : "0.8",
"dfs.client.file-block-storage-locations.timeout" : "60",
"dfs.datanode.drop.cache.behind.writes" : "false", "dfs.datanode.drop.cache.behind.writes" : "false",
"mapreduce.job.end-notification.retry.interval" : "1", "mapreduce.job.end-notification.retry.interval" : "1",
"mapreduce.job.maps" : "96", "mapreduce.job.maps" : "96",
@ -10511,7 +10506,6 @@
"dfs.datanode.directoryscan.interval" : "21600", "dfs.datanode.directoryscan.interval" : "21600",
"yarn.resourcemanager.address" : "a2115.smile.com:8032", "yarn.resourcemanager.address" : "a2115.smile.com:8032",
"yarn.nodemanager.health-checker.interval-ms" : "600000", "yarn.nodemanager.health-checker.interval-ms" : "600000",
"dfs.client.file-block-storage-locations.num-threads" : "10",
"yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs" : "86400", "yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs" : "86400",
"mapreduce.reduce.markreset.buffer.percent" : "0.0", "mapreduce.reduce.markreset.buffer.percent" : "0.0",
"hadoop.security.group.mapping.ldap.directory.search.timeout" : "10000", "hadoop.security.group.mapping.ldap.directory.search.timeout" : "10000",

View File

@ -199,15 +199,6 @@ public class NodeInfo {
public ResourceUtilization getNodeUtilization() { public ResourceUtilization getNodeUtilization() {
return null; return null;
} }
@Override
public long getUntrackedTimeStamp() {
return 0;
}
@Override
public void setUntrackedTimeStamp(long timeStamp) {
}
} }
public static RMNode newNodeInfo(String rackName, String hostName, public static RMNode newNodeInfo(String rackName, String hostName,

View File

@ -188,13 +188,4 @@ public class RMNodeWrapper implements RMNode {
public ResourceUtilization getNodeUtilization() { public ResourceUtilization getNodeUtilization() {
return node.getNodeUtilization(); return node.getNodeUtilization();
} }
@Override
public long getUntrackedTimeStamp() {
return 0;
}
@Override
public void setUntrackedTimeStamp(long timeStamp) {
}
} }

View File

@ -165,13 +165,12 @@ public abstract class ContainerId implements Comparable<ContainerId>{
@Override @Override
public int compareTo(ContainerId other) { public int compareTo(ContainerId other) {
if (this.getApplicationAttemptId().compareTo( int result = this.getApplicationAttemptId().compareTo(
other.getApplicationAttemptId()) == 0) { other.getApplicationAttemptId());
return Long.valueOf(getContainerId()) if (result == 0) {
.compareTo(Long.valueOf(other.getContainerId())); return Long.compare(getContainerId(), other.getContainerId());
} else { } else {
return this.getApplicationAttemptId().compareTo( return result;
other.getApplicationAttemptId());
} }
} }

View File

@ -647,15 +647,6 @@ public class YarnConfiguration extends Configuration {
public static final String DEFAULT_RM_NODEMANAGER_MINIMUM_VERSION = public static final String DEFAULT_RM_NODEMANAGER_MINIMUM_VERSION =
"NONE"; "NONE";
/**
* Timeout(msec) for an untracked node to remain in shutdown or decommissioned
* state.
*/
public static final String RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC =
RM_PREFIX + "node-removal-untracked.timeout-ms";
public static final int
DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC = 60000;
/** /**
* RM proxy users' prefix * RM proxy users' prefix
*/ */

View File

@ -171,8 +171,6 @@ public class NMClientImpl extends NMClient {
throw RPCUtil.getRemoteException("Container " throw RPCUtil.getRemoteException("Container "
+ startedContainer.containerId.toString() + " is already started"); + startedContainer.containerId.toString() + " is already started");
} }
startedContainers
.put(startedContainer.getContainerId(), startedContainer);
} }
@Override @Override
@ -182,7 +180,8 @@ public class NMClientImpl extends NMClient {
// Do synchronization on StartedContainer to prevent race condition // Do synchronization on StartedContainer to prevent race condition
// between startContainer and stopContainer only when startContainer is // between startContainer and stopContainer only when startContainer is
// in progress for a given container. // in progress for a given container.
StartedContainer startingContainer = createStartedContainer(container); StartedContainer startingContainer =
new StartedContainer(container.getId(), container.getNodeId());
synchronized (startingContainer) { synchronized (startingContainer) {
addStartingContainer(startingContainer); addStartingContainer(startingContainer);
@ -210,18 +209,14 @@ public class NMClientImpl extends NMClient {
} }
allServiceResponse = response.getAllServicesMetaData(); allServiceResponse = response.getAllServicesMetaData();
startingContainer.state = ContainerState.RUNNING; startingContainer.state = ContainerState.RUNNING;
} catch (YarnException e) { } catch (YarnException | IOException e) {
startingContainer.state = ContainerState.COMPLETE; startingContainer.state = ContainerState.COMPLETE;
// Remove the started container if it failed to start // Remove the started container if it failed to start
removeStartedContainer(startingContainer); startedContainers.remove(startingContainer.containerId);
throw e;
} catch (IOException e) {
startingContainer.state = ContainerState.COMPLETE;
removeStartedContainer(startingContainer);
throw e; throw e;
} catch (Throwable t) { } catch (Throwable t) {
startingContainer.state = ContainerState.COMPLETE; startingContainer.state = ContainerState.COMPLETE;
removeStartedContainer(startingContainer); startedContainers.remove(startingContainer.containerId);
throw RPCUtil.getRemoteException(t); throw RPCUtil.getRemoteException(t);
} finally { } finally {
if (proxy != null) { if (proxy != null) {
@ -263,7 +258,7 @@ public class NMClientImpl extends NMClient {
@Override @Override
public void stopContainer(ContainerId containerId, NodeId nodeId) public void stopContainer(ContainerId containerId, NodeId nodeId)
throws YarnException, IOException { throws YarnException, IOException {
StartedContainer startedContainer = getStartedContainer(containerId); StartedContainer startedContainer = startedContainers.get(containerId);
// Only allow one request of stopping the container to move forward // Only allow one request of stopping the container to move forward
// When entering the block, check whether the precursor has already stopped // When entering the block, check whether the precursor has already stopped
@ -276,7 +271,7 @@ public class NMClientImpl extends NMClient {
stopContainerInternal(containerId, nodeId); stopContainerInternal(containerId, nodeId);
// Only after successful // Only after successful
startedContainer.state = ContainerState.COMPLETE; startedContainer.state = ContainerState.COMPLETE;
removeStartedContainer(startedContainer); startedContainers.remove(startedContainer.containerId);
} }
} else { } else {
stopContainerInternal(containerId, nodeId); stopContainerInternal(containerId, nodeId);
@ -334,23 +329,6 @@ public class NMClientImpl extends NMClient {
} }
} }
protected synchronized StartedContainer createStartedContainer(
Container container) throws YarnException, IOException {
StartedContainer startedContainer = new StartedContainer(container.getId(),
container.getNodeId());
return startedContainer;
}
protected synchronized void
removeStartedContainer(StartedContainer container) {
startedContainers.remove(container.containerId);
}
protected synchronized StartedContainer getStartedContainer(
ContainerId containerId) {
return startedContainers.get(containerId);
}
public AtomicBoolean getCleanupRunningContainers() { public AtomicBoolean getCleanupRunningContainers() {
return cleanupRunningContainers; return cleanupRunningContainers;
} }

View File

@ -241,7 +241,7 @@ public class TopCLI extends YarnCLI {
@Override @Override
public int public int
compare(ApplicationInformation a1, ApplicationInformation a2) { compare(ApplicationInformation a1, ApplicationInformation a2) {
return Long.valueOf(a1.usedMemory).compareTo(a2.usedMemory); return Long.compare(a1.usedMemory, a2.usedMemory);
} }
}; };
public static final Comparator<ApplicationInformation> ReservedMemoryComparator = public static final Comparator<ApplicationInformation> ReservedMemoryComparator =
@ -249,7 +249,7 @@ public class TopCLI extends YarnCLI {
@Override @Override
public int public int
compare(ApplicationInformation a1, ApplicationInformation a2) { compare(ApplicationInformation a1, ApplicationInformation a2) {
return Long.valueOf(a1.reservedMemory).compareTo(a2.reservedMemory); return Long.compare(a1.reservedMemory, a2.reservedMemory);
} }
}; };
public static final Comparator<ApplicationInformation> UsedVCoresComparator = public static final Comparator<ApplicationInformation> UsedVCoresComparator =
@ -273,7 +273,7 @@ public class TopCLI extends YarnCLI {
@Override @Override
public int public int
compare(ApplicationInformation a1, ApplicationInformation a2) { compare(ApplicationInformation a1, ApplicationInformation a2) {
return Long.valueOf(a1.vcoreSeconds).compareTo(a2.vcoreSeconds); return Long.compare(a1.vcoreSeconds, a2.vcoreSeconds);
} }
}; };
public static final Comparator<ApplicationInformation> MemorySecondsComparator = public static final Comparator<ApplicationInformation> MemorySecondsComparator =
@ -281,7 +281,7 @@ public class TopCLI extends YarnCLI {
@Override @Override
public int public int
compare(ApplicationInformation a1, ApplicationInformation a2) { compare(ApplicationInformation a1, ApplicationInformation a2) {
return Long.valueOf(a1.memorySeconds).compareTo(a2.memorySeconds); return Long.compare(a1.memorySeconds, a2.memorySeconds);
} }
}; };
public static final Comparator<ApplicationInformation> ProgressComparator = public static final Comparator<ApplicationInformation> ProgressComparator =
@ -297,7 +297,7 @@ public class TopCLI extends YarnCLI {
@Override @Override
public int public int
compare(ApplicationInformation a1, ApplicationInformation a2) { compare(ApplicationInformation a1, ApplicationInformation a2) {
return Long.valueOf(a1.runningTime).compareTo(a2.runningTime); return Long.compare(a1.runningTime, a2.runningTime);
} }
}; };
public static final Comparator<ApplicationInformation> AppNameComparator = public static final Comparator<ApplicationInformation> AppNameComparator =

View File

@ -270,7 +270,7 @@ public class WebApps {
} }
if (httpScheme.equals(WebAppUtils.HTTPS_PREFIX)) { if (httpScheme.equals(WebAppUtils.HTTPS_PREFIX)) {
WebAppUtils.loadSslConfiguration(builder); WebAppUtils.loadSslConfiguration(builder, conf);
} }
HttpServer2 server = builder.build(); HttpServer2 server = builder.build();

View File

@ -109,8 +109,8 @@ table.display thead th div.DataTables_sort_wrapper span {
.dataTables_wrapper { .dataTables_wrapper {
position: relative; position: relative;
min-height: 302px; min-height: 35px;
_height: 302px; _height: 35px;
clear: both; clear: both;
} }

View File

@ -2722,17 +2722,4 @@
<name>yarn.timeline-service.webapp.rest-csrf.methods-to-ignore</name> <name>yarn.timeline-service.webapp.rest-csrf.methods-to-ignore</name>
<value>GET,OPTIONS,HEAD</value> <value>GET,OPTIONS,HEAD</value>
</property> </property>
<property>
<description>
The least amount of time(msec.) an inactive (decommissioned or shutdown) node can
stay in the nodes list of the resourcemanager after being declared untracked.
A node is marked untracked if and only if it is absent from both include and
exclude nodemanager lists on the RM. All inactive nodes are checked twice per
timeout interval or every 10 minutes, whichever is lesser, and marked appropriately.
The same is done when refreshNodes command (graceful or otherwise) is invoked.
</description>
<name>yarn.resourcemanager.node-removal-untracked.timeout-ms</name>
<value>60000</value>
</property>
</configuration> </configuration>

View File

@ -54,7 +54,7 @@ public class TestYarnConfiguration {
String rmWebUrl = WebAppUtils.getRMWebAppURLWithScheme(conf); String rmWebUrl = WebAppUtils.getRMWebAppURLWithScheme(conf);
String[] parts = rmWebUrl.split(":"); String[] parts = rmWebUrl.split(":");
Assert.assertEquals("RM Web URL Port is incrrect", 24543, Assert.assertEquals("RM Web URL Port is incrrect", 24543,
Integer.valueOf(parts[parts.length - 1]).intValue()); Integer.parseInt(parts[parts.length - 1]));
Assert.assertNotSame( Assert.assertNotSame(
"RM Web Url not resolved correctly. Should not be rmtesting", "RM Web Url not resolved correctly. Should not be rmtesting",
"http://rmtesting:24543", rmWebUrl); "http://rmtesting:24543", rmWebUrl);
@ -178,7 +178,7 @@ public class TestYarnConfiguration {
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo"); conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo");
serverAddress = new InetSocketAddress( serverAddress = new InetSocketAddress(
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0], YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1])); Integer.parseInt(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
resourceTrackerConnectAddress = conf.updateConnectAddr( resourceTrackerConnectAddress = conf.updateConnectAddr(
YarnConfiguration.RM_BIND_HOST, YarnConfiguration.RM_BIND_HOST,
@ -194,7 +194,7 @@ public class TestYarnConfiguration {
conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0"); conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0");
serverAddress = new InetSocketAddress( serverAddress = new InetSocketAddress(
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0], YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1])); Integer.parseInt(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
resourceTrackerConnectAddress = conf.updateConnectAddr( resourceTrackerConnectAddress = conf.updateConnectAddr(
YarnConfiguration.RM_BIND_HOST, YarnConfiguration.RM_BIND_HOST,
@ -213,7 +213,7 @@ public class TestYarnConfiguration {
serverAddress = new InetSocketAddress( serverAddress = new InetSocketAddress(
YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS.split(":")[0], YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS.split(":")[0],
Integer.valueOf(YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS.split(":")[1])); Integer.parseInt(YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS.split(":")[1]));
InetSocketAddress localizerAddress = conf.updateConnectAddr( InetSocketAddress localizerAddress = conf.updateConnectAddr(
YarnConfiguration.NM_BIND_HOST, YarnConfiguration.NM_BIND_HOST,

View File

@ -140,7 +140,7 @@ public class NodeLabelTestBase {
int idx = str.indexOf(':'); int idx = str.indexOf(':');
NodeId id = NodeId id =
NodeId.newInstance(str.substring(0, idx), NodeId.newInstance(str.substring(0, idx),
Integer.valueOf(str.substring(idx + 1))); Integer.parseInt(str.substring(idx + 1)));
return id; return id;
} else { } else {
return NodeId.newInstance(str, CommonNodeLabelsManager.WILDCARD_PORT); return NodeId.newInstance(str, CommonNodeLabelsManager.WILDCARD_PORT);

View File

@ -431,7 +431,7 @@ public class TestFSDownload {
try { try {
for (Map.Entry<LocalResource,Future<Path>> p : pending.entrySet()) { for (Map.Entry<LocalResource,Future<Path>> p : pending.entrySet()) {
Path localized = p.getValue().get(); Path localized = p.getValue().get();
assertEquals(sizes[Integer.valueOf(localized.getName())], p.getKey() assertEquals(sizes[Integer.parseInt(localized.getName())], p.getKey()
.getSize()); .getSize());
FileStatus status = files.getFileStatus(localized.getParent()); FileStatus status = files.getFileStatus(localized.getParent());

View File

@ -772,7 +772,7 @@ public class RegistrySecurity extends AbstractService {
* @return true if the SASL client system property is set. * @return true if the SASL client system property is set.
*/ */
public static boolean isClientSASLEnabled() { public static boolean isClientSASLEnabled() {
return Boolean.valueOf(System.getProperty( return Boolean.parseBoolean(System.getProperty(
ZookeeperConfigOptions.PROP_ZK_ENABLE_SASL_CLIENT, "true")); ZookeeperConfigOptions.PROP_ZK_ENABLE_SASL_CLIENT, "true"));
} }
@ -862,7 +862,7 @@ public class RegistrySecurity extends AbstractService {
String sasl = String sasl =
System.getProperty(PROP_ZK_ENABLE_SASL_CLIENT, System.getProperty(PROP_ZK_ENABLE_SASL_CLIENT,
DEFAULT_ZK_ENABLE_SASL_CLIENT); DEFAULT_ZK_ENABLE_SASL_CLIENT);
boolean saslEnabled = Boolean.valueOf(sasl); boolean saslEnabled = Boolean.parseBoolean(sasl);
builder.append(describeProperty(PROP_ZK_ENABLE_SASL_CLIENT, builder.append(describeProperty(PROP_ZK_ENABLE_SASL_CLIENT,
DEFAULT_ZK_ENABLE_SASL_CLIENT)); DEFAULT_ZK_ENABLE_SASL_CLIENT));
if (saslEnabled) { if (saslEnabled) {

View File

@ -284,6 +284,7 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
return; return;
} }
this.isStopped = true; this.isStopped = true;
sendOutofBandHeartBeat();
try { try {
statusUpdater.join(); statusUpdater.join();
registerWithRM(); registerWithRM();

View File

@ -1017,7 +1017,7 @@ public class ContainerLaunch implements Callable<Integer> {
//variable can be set to indicate that distcache entries should come //variable can be set to indicate that distcache entries should come
//first //first
boolean preferLocalizedJars = Boolean.valueOf( boolean preferLocalizedJars = Boolean.parseBoolean(
environment.get(Environment.CLASSPATH_PREPEND_DISTCACHE.name()) environment.get(Environment.CLASSPATH_PREPEND_DISTCACHE.name())
); );

View File

@ -200,9 +200,11 @@ public class LocalizedResource implements EventHandler<ResourceEvent> {
LOG.warn("Can't handle this event at current state", e); LOG.warn("Can't handle this event at current state", e);
} }
if (oldState != newState) { if (oldState != newState) {
LOG.info("Resource " + resourcePath + (localPath != null ? if (LOG.isDebugEnabled()) {
"(->" + localPath + ")": "") + " transitioned from " + oldState LOG.debug("Resource " + resourcePath + (localPath != null ?
+ " to " + newState); "(->" + localPath + ")": "") + " transitioned from " + oldState
+ " to " + newState);
}
} }
} finally { } finally {
this.writeLock.unlock(); this.writeLock.unlock();

View File

@ -79,7 +79,7 @@ public class ProcessIdFileReader {
else { else {
// Otherwise, find first line containing a numeric pid. // Otherwise, find first line containing a numeric pid.
try { try {
Long pid = Long.valueOf(temp); long pid = Long.parseLong(temp);
if (pid > 0) { if (pid > 0) {
processId = temp; processId = temp;
break; break;

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.YarnWebParams;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.InfoBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock;
@ -75,10 +76,21 @@ public class ApplicationPage extends NMView implements YarnWebParams {
@Override @Override
protected void render(Block html) { protected void render(Block html) {
ApplicationId applicationID = ApplicationId applicationID = null;
ConverterUtils.toApplicationId(this.recordFactory, try {
$(APPLICATION_ID)); applicationID = ConverterUtils.toApplicationId(this.recordFactory,
$(APPLICATION_ID));
} catch (IllegalArgumentException e) {
html.p()._("Invalid Application Id " + $(APPLICATION_ID))._();
return;
}
DIV<Hamlet> div = html.div("#content");
Application app = this.nmContext.getApplications().get(applicationID); Application app = this.nmContext.getApplications().get(applicationID);
if (app == null) {
div.h1("Unknown application with id " + applicationID
+ ". Application might have been completed")._();
return;
}
AppInfo info = new AppInfo(app); AppInfo info = new AppInfo(app);
info("Application's information") info("Application's information")
._("ApplicationId", info.getId()) ._("ApplicationId", info.getId())

View File

@ -108,6 +108,7 @@ public class TestNodeManagerResync {
static final String user = "nobody"; static final String user = "nobody";
private FileContext localFS; private FileContext localFS;
private CyclicBarrier syncBarrier; private CyclicBarrier syncBarrier;
private CyclicBarrier updateBarrier;
private AtomicBoolean assertionFailedInThread = new AtomicBoolean(false); private AtomicBoolean assertionFailedInThread = new AtomicBoolean(false);
private AtomicBoolean isNMShutdownCalled = new AtomicBoolean(false); private AtomicBoolean isNMShutdownCalled = new AtomicBoolean(false);
private final NodeManagerEvent resyncEvent = private final NodeManagerEvent resyncEvent =
@ -125,6 +126,7 @@ public class TestNodeManagerResync {
remoteLogsDir.mkdirs(); remoteLogsDir.mkdirs();
nmLocalDir.mkdirs(); nmLocalDir.mkdirs();
syncBarrier = new CyclicBarrier(2); syncBarrier = new CyclicBarrier(2);
updateBarrier = new CyclicBarrier(2);
} }
@After @After
@ -803,9 +805,11 @@ public class TestNodeManagerResync {
.getContainerStatuses(gcsRequest).getContainerStatuses().get(0); .getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
assertEquals(Resource.newInstance(1024, 1), assertEquals(Resource.newInstance(1024, 1),
containerStatus.getCapability()); containerStatus.getCapability());
updateBarrier.await();
// Call the actual rebootNodeStatusUpdaterAndRegisterWithRM(). // Call the actual rebootNodeStatusUpdaterAndRegisterWithRM().
// This function should be synchronized with // This function should be synchronized with
// increaseContainersResource(). // increaseContainersResource().
updateBarrier.await();
super.rebootNodeStatusUpdaterAndRegisterWithRM(); super.rebootNodeStatusUpdaterAndRegisterWithRM();
// Check status after registerWithRM // Check status after registerWithRM
containerStatus = getContainerManager() containerStatus = getContainerManager()
@ -831,17 +835,24 @@ public class TestNodeManagerResync {
List<Token> increaseTokens = new ArrayList<Token>(); List<Token> increaseTokens = new ArrayList<Token>();
// Add increase request. // Add increase request.
Resource targetResource = Resource.newInstance(4096, 2); Resource targetResource = Resource.newInstance(4096, 2);
try { try{
increaseTokens.add(getContainerToken(targetResource)); try {
IncreaseContainersResourceRequest increaseRequest = updateBarrier.await();
IncreaseContainersResourceRequest.newInstance(increaseTokens); increaseTokens.add(getContainerToken(targetResource));
IncreaseContainersResourceResponse increaseResponse = IncreaseContainersResourceRequest increaseRequest =
getContainerManager() IncreaseContainersResourceRequest.newInstance(increaseTokens);
.increaseContainersResource(increaseRequest); IncreaseContainersResourceResponse increaseResponse =
Assert.assertEquals( getContainerManager()
1, increaseResponse.getSuccessfullyIncreasedContainers() .increaseContainersResource(increaseRequest);
.size()); Assert.assertEquals(
Assert.assertTrue(increaseResponse.getFailedRequests().isEmpty()); 1, increaseResponse.getSuccessfullyIncreasedContainers()
.size());
Assert.assertTrue(increaseResponse.getFailedRequests().isEmpty());
} catch (Exception e) {
e.printStackTrace();
} finally {
updateBarrier.await();
}
} catch (Exception e) { } catch (Exception e) {
e.printStackTrace(); e.printStackTrace();
} }

View File

@ -223,16 +223,27 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
any(UserGroupInformation.class)); any(UserGroupInformation.class));
verify(delSrvc).delete(eq(user), eq((Path) null), verify(delSrvc).delete(eq(user), eq((Path) null),
eq(new Path(app1LogDir.getAbsolutePath()))); eq(new Path(app1LogDir.getAbsolutePath())));
delSrvc.stop();
String containerIdStr = ConverterUtils.toString(container11); String containerIdStr = ConverterUtils.toString(container11);
File containerLogDir = new File(app1LogDir, containerIdStr); File containerLogDir = new File(app1LogDir, containerIdStr);
int count = 0;
int maxAttempts = 50;
for (String fileType : new String[] { "stdout", "stderr", "syslog" }) { for (String fileType : new String[] { "stdout", "stderr", "syslog" }) {
File f = new File(containerLogDir, fileType); File f = new File(containerLogDir, fileType);
Assert.assertFalse("check "+f, f.exists()); count = 0;
while ((f.exists()) && (count < maxAttempts)) {
count++;
Thread.sleep(100);
}
Assert.assertFalse("File [" + f + "] was not deleted", f.exists());
} }
count = 0;
Assert.assertFalse(app1LogDir.exists()); while ((app1LogDir.exists()) && (count < maxAttempts)) {
count++;
Thread.sleep(100);
}
Assert.assertFalse("Directory [" + app1LogDir + "] was not deleted",
app1LogDir.exists());
Path logFilePath = Path logFilePath =
logAggregationService.getRemoteNodeLogFileForApp(application1, logAggregationService.getRemoteNodeLogFileForApp(application1,

View File

@ -0,0 +1,86 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.webapp;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.Arrays;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager;
import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
import org.apache.hadoop.yarn.server.nodemanager.webapp.ApplicationPage.ApplicationBlock;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.webapp.YarnWebParams;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import com.google.inject.Binder;
import com.google.inject.Injector;
import com.google.inject.Module;
@RunWith(Parameterized.class)
public class TestNMAppsPage {
String applicationid;
public TestNMAppsPage(String appid) {
this.applicationid = appid;
}
@Parameterized.Parameters
public static Collection<Object[]> getAppIds() {
return Arrays.asList(new Object[][] { { "appid" },
{ "application_123123213_0001" }, { "" } });
}
@Test
public void testNMAppsPage() {
Configuration conf = new Configuration();
final NMContext nmcontext = new NMContext(
new NMContainerTokenSecretManager(conf), new NMTokenSecretManagerInNM(),
null, new ApplicationACLsManager(conf), new NMNullStateStoreService());
Injector injector = WebAppTests.createMockInjector(NMContext.class,
nmcontext, new Module() {
@Override
public void configure(Binder binder) {
NodeManager nm = TestNMAppsPage.mocknm(nmcontext);
binder.bind(NodeManager.class).toInstance(nm);
binder.bind(Context.class).toInstance(nmcontext);
}
});
ApplicationBlock instance = injector.getInstance(ApplicationBlock.class);
instance.set(YarnWebParams.APPLICATION_ID, applicationid);
instance.render();
}
protected static NodeManager mocknm(NMContext nmcontext) {
NodeManager rm = mock(NodeManager.class);
when(rm.getNMContext()).thenReturn(nmcontext);
return rm;
}
}

View File

@ -682,7 +682,11 @@ public class AdminService extends CompositeService implements
return conf; return conf;
} }
private void refreshAll() throws ServiceFailedException { /*
* Visibility could be private for test its made as default
*/
@VisibleForTesting
void refreshAll() throws ServiceFailedException {
try { try {
refreshQueues(RefreshQueuesRequest.newInstance()); refreshQueues(RefreshQueuesRequest.newInstance());
refreshNodes(RefreshNodesRequest.newInstance(DecommissionType.NORMAL)); refreshNodes(RefreshNodesRequest.newInstance(DecommissionType.NORMAL));

View File

@ -36,7 +36,6 @@ import org.apache.hadoop.net.Node;
import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.util.HostsFileReader; import org.apache.hadoop.util.HostsFileReader;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -69,8 +68,6 @@ public class NodesListManager extends CompositeService implements
private String excludesFile; private String excludesFile;
private Resolver resolver; private Resolver resolver;
private Timer removalTimer;
private int nodeRemovalCheckInterval;
public NodesListManager(RMContext rmContext) { public NodesListManager(RMContext rmContext) {
super(NodesListManager.class.getName()); super(NodesListManager.class.getName());
@ -108,56 +105,9 @@ public class NodesListManager extends CompositeService implements
} catch (IOException ioe) { } catch (IOException ioe) {
disableHostsFileReader(ioe); disableHostsFileReader(ioe);
} }
final int nodeRemovalTimeout =
conf.getInt(
YarnConfiguration.RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC,
YarnConfiguration.
DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC);
nodeRemovalCheckInterval = (Math.min(nodeRemovalTimeout/2,
600000));
removalTimer = new Timer("Node Removal Timer");
removalTimer.schedule(new TimerTask() {
@Override
public void run() {
long now = Time.monotonicNow();
for (Map.Entry<NodeId, RMNode> entry :
rmContext.getInactiveRMNodes().entrySet()) {
NodeId nodeId = entry.getKey();
RMNode rmNode = entry.getValue();
if (isUntrackedNode(rmNode.getHostName())) {
if (rmNode.getUntrackedTimeStamp() == 0) {
rmNode.setUntrackedTimeStamp(now);
} else if (now - rmNode.getUntrackedTimeStamp() >
nodeRemovalTimeout) {
RMNode result = rmContext.getInactiveRMNodes().remove(nodeId);
if (result != null) {
ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
if (rmNode.getState() == NodeState.SHUTDOWN) {
clusterMetrics.decrNumShutdownNMs();
} else {
clusterMetrics.decrDecommisionedNMs();
}
LOG.info("Removed "+result.getHostName() +
" from inactive nodes list");
}
}
} else {
rmNode.setUntrackedTimeStamp(0);
}
}
}
}, nodeRemovalCheckInterval, nodeRemovalCheckInterval);
super.serviceInit(conf); super.serviceInit(conf);
} }
@Override
public void serviceStop() {
removalTimer.cancel();
}
private void printConfiguredHosts() { private void printConfiguredHosts() {
if (!LOG.isDebugEnabled()) { if (!LOG.isDebugEnabled()) {
return; return;
@ -181,13 +131,10 @@ public class NodesListManager extends CompositeService implements
for (NodeId nodeId: rmContext.getRMNodes().keySet()) { for (NodeId nodeId: rmContext.getRMNodes().keySet()) {
if (!isValidNode(nodeId.getHost())) { if (!isValidNode(nodeId.getHost())) {
RMNodeEventType nodeEventType = isUntrackedNode(nodeId.getHost()) ?
RMNodeEventType.SHUTDOWN : RMNodeEventType.DECOMMISSION;
this.rmContext.getDispatcher().getEventHandler().handle( this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeEvent(nodeId, nodeEventType)); new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION));
} }
} }
updateInactiveNodes();
} }
private void refreshHostsReader(Configuration yarnConf) throws IOException, private void refreshHostsReader(Configuration yarnConf) throws IOException,
@ -224,16 +171,6 @@ public class NodesListManager extends CompositeService implements
} }
} }
@VisibleForTesting
public int getNodeRemovalCheckInterval() {
return nodeRemovalCheckInterval;
}
@VisibleForTesting
public void setNodeRemovalCheckInterval(int interval) {
this.nodeRemovalCheckInterval = interval;
}
@VisibleForTesting @VisibleForTesting
public Resolver getResolver() { public Resolver getResolver() {
return resolver; return resolver;
@ -437,33 +374,6 @@ public class NodesListManager extends CompositeService implements
return hostsReader; return hostsReader;
} }
private void updateInactiveNodes() {
long now = Time.monotonicNow();
for(Entry<NodeId, RMNode> entry :
rmContext.getInactiveRMNodes().entrySet()) {
NodeId nodeId = entry.getKey();
RMNode rmNode = entry.getValue();
if (isUntrackedNode(nodeId.getHost()) &&
rmNode.getUntrackedTimeStamp() == 0) {
rmNode.setUntrackedTimeStamp(now);
}
}
}
public boolean isUntrackedNode(String hostName) {
boolean untracked;
String ip = resolver.resolve(hostName);
synchronized (hostsReader) {
Set<String> hostsList = hostsReader.getHosts();
Set<String> excludeList = hostsReader.getExcludedHosts();
untracked = !hostsList.isEmpty() &&
!hostsList.contains(hostName) && !hostsList.contains(ip) &&
!excludeList.contains(hostName) && !excludeList.contains(ip);
}
return untracked;
}
/** /**
* Refresh the nodes gracefully * Refresh the nodes gracefully
* *
@ -474,13 +384,11 @@ public class NodesListManager extends CompositeService implements
public void refreshNodesGracefully(Configuration conf) throws IOException, public void refreshNodesGracefully(Configuration conf) throws IOException,
YarnException { YarnException {
refreshHostsReader(conf); refreshHostsReader(conf);
for (Entry<NodeId, RMNode> entry : rmContext.getRMNodes().entrySet()) { for (Entry<NodeId, RMNode> entry:rmContext.getRMNodes().entrySet()) {
NodeId nodeId = entry.getKey(); NodeId nodeId = entry.getKey();
if (!isValidNode(nodeId.getHost())) { if (!isValidNode(nodeId.getHost())) {
RMNodeEventType nodeEventType = isUntrackedNode(nodeId.getHost()) ?
RMNodeEventType.SHUTDOWN : RMNodeEventType.GRACEFUL_DECOMMISSION;
this.rmContext.getDispatcher().getEventHandler().handle( this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeEvent(nodeId, nodeEventType)); new RMNodeEvent(nodeId, RMNodeEventType.GRACEFUL_DECOMMISSION));
} else { } else {
// Recommissioning the nodes // Recommissioning the nodes
if (entry.getValue().getState() == NodeState.DECOMMISSIONING) { if (entry.getValue().getState() == NodeState.DECOMMISSIONING) {
@ -489,7 +397,6 @@ public class NodesListManager extends CompositeService implements
} }
} }
} }
updateInactiveNodes();
} }
/** /**
@ -513,11 +420,8 @@ public class NodesListManager extends CompositeService implements
public void refreshNodesForcefully() { public void refreshNodesForcefully() {
for (Entry<NodeId, RMNode> entry : rmContext.getRMNodes().entrySet()) { for (Entry<NodeId, RMNode> entry : rmContext.getRMNodes().entrySet()) {
if (entry.getValue().getState() == NodeState.DECOMMISSIONING) { if (entry.getValue().getState() == NodeState.DECOMMISSIONING) {
RMNodeEventType nodeEventType =
isUntrackedNode(entry.getKey().getHost()) ?
RMNodeEventType.SHUTDOWN : RMNodeEventType.DECOMMISSION;
this.rmContext.getDispatcher().getEventHandler().handle( this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeEvent(entry.getKey(), nodeEventType)); new RMNodeEvent(entry.getKey(), RMNodeEventType.DECOMMISSION));
} }
} }
} }

View File

@ -87,7 +87,7 @@ public class RMServerUtils {
acceptedStates.contains(NodeState.LOST) || acceptedStates.contains(NodeState.LOST) ||
acceptedStates.contains(NodeState.REBOOTED)) { acceptedStates.contains(NodeState.REBOOTED)) {
for (RMNode rmNode : context.getInactiveRMNodes().values()) { for (RMNode rmNode : context.getInactiveRMNodes().values()) {
if ((rmNode != null) && acceptedStates.contains(rmNode.getState())) { if (acceptedStates.contains(rmNode.getState())) {
results.add(rmNode); results.add(rmNode);
} }
} }

View File

@ -320,8 +320,7 @@ public class ResourceTrackerService extends AbstractService implements
} }
// Check if this node is a 'valid' node // Check if this node is a 'valid' node
if (!this.nodesListManager.isValidNode(host) || if (!this.nodesListManager.isValidNode(host)) {
this.nodesListManager.isUntrackedNode(host)) {
String message = String message =
"Disallowed NodeManager from " + host "Disallowed NodeManager from " + host
+ ", Sending SHUTDOWN signal to the NodeManager."; + ", Sending SHUTDOWN signal to the NodeManager.";
@ -452,9 +451,8 @@ public class ResourceTrackerService extends AbstractService implements
// 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is // 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is
// in decommissioning. // in decommissioning.
if ((!this.nodesListManager.isValidNode(nodeId.getHost()) && if (!this.nodesListManager.isValidNode(nodeId.getHost())
!isNodeInDecommissioning(nodeId)) || && !isNodeInDecommissioning(nodeId)) {
this.nodesListManager.isUntrackedNode(nodeId.getHost())) {
String message = String message =
"Disallowed NodeManager nodeId: " + nodeId + " hostname: " "Disallowed NodeManager nodeId: " + nodeId + " hostname: "
+ nodeId.getHost(); + nodeId.getHost();

View File

@ -927,6 +927,20 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
this.justFinishedContainers = attempt.getJustFinishedContainersReference(); this.justFinishedContainers = attempt.getJustFinishedContainersReference();
this.finishedContainersSentToAM = this.finishedContainersSentToAM =
attempt.getFinishedContainersSentToAMReference(); attempt.getFinishedContainersSentToAMReference();
// container complete msg was moved from justFinishedContainers to
// finishedContainersSentToAM in ApplicationMasterService#allocate,
// if am crashed and not received this response, we should resend
// this msg again after am restart
if (!this.finishedContainersSentToAM.isEmpty()) {
for (NodeId nodeId : this.finishedContainersSentToAM.keySet()) {
List<ContainerStatus> containerStatuses =
this.finishedContainersSentToAM.get(nodeId);
this.justFinishedContainers.putIfAbsent(nodeId,
new ArrayList<ContainerStatus>());
this.justFinishedContainers.get(nodeId).addAll(containerStatuses);
}
this.finishedContainersSentToAM.clear();
}
} }
private void recoverAppAttemptCredentials(Credentials appAttemptTokens, private void recoverAppAttemptCredentials(Credentials appAttemptTokens,
@ -1845,13 +1859,13 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
} else { } else {
LOG.warn("No ContainerStatus in containerFinishedEvent"); LOG.warn("No ContainerStatus in containerFinishedEvent");
} }
finishedContainersSentToAM.putIfAbsent(nodeId,
new ArrayList<ContainerStatus>());
appAttempt.finishedContainersSentToAM.get(nodeId).add(
containerFinishedEvent.getContainerStatus());
if (!appAttempt.getSubmissionContext() if (!appAttempt.getSubmissionContext()
.getKeepContainersAcrossApplicationAttempts()) { .getKeepContainersAcrossApplicationAttempts()) {
finishedContainersSentToAM.putIfAbsent(nodeId,
new ArrayList<ContainerStatus>());
appAttempt.finishedContainersSentToAM.get(nodeId).add(
containerFinishedEvent.getContainerStatus());
appAttempt.sendFinishedContainersToNM(); appAttempt.sendFinishedContainersToNM();
} else { } else {
appAttempt.sendFinishedAMContainerToNM(nodeId, appAttempt.sendFinishedAMContainerToNM(nodeId,

View File

@ -168,8 +168,4 @@ public interface RMNode {
NodeHeartbeatResponse response); NodeHeartbeatResponse response);
public List<Container> pullNewlyIncreasedContainers(); public List<Container> pullNewlyIncreasedContainers();
long getUntrackedTimeStamp();
void setUntrackedTimeStamp(long timer);
} }

View File

@ -39,7 +39,6 @@ import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.Container;
@ -121,7 +120,6 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
private long lastHealthReportTime; private long lastHealthReportTime;
private String nodeManagerVersion; private String nodeManagerVersion;
private long timeStamp;
/* Aggregated resource utilization for the containers. */ /* Aggregated resource utilization for the containers. */
private ResourceUtilization containersUtilization; private ResourceUtilization containersUtilization;
/* Resource utilization for the node. */ /* Resource utilization for the node. */
@ -261,9 +259,6 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
.addTransition(NodeState.DECOMMISSIONING, NodeState.DECOMMISSIONING, .addTransition(NodeState.DECOMMISSIONING, NodeState.DECOMMISSIONING,
RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition()) RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition())
.addTransition(NodeState.DECOMMISSIONING, NodeState.SHUTDOWN,
RMNodeEventType.SHUTDOWN,
new DeactivateNodeTransition(NodeState.SHUTDOWN))
// TODO (in YARN-3223) update resource when container finished. // TODO (in YARN-3223) update resource when container finished.
.addTransition(NodeState.DECOMMISSIONING, NodeState.DECOMMISSIONING, .addTransition(NodeState.DECOMMISSIONING, NodeState.DECOMMISSIONING,
@ -351,7 +346,6 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
this.healthReport = "Healthy"; this.healthReport = "Healthy";
this.lastHealthReportTime = System.currentTimeMillis(); this.lastHealthReportTime = System.currentTimeMillis();
this.nodeManagerVersion = nodeManagerVersion; this.nodeManagerVersion = nodeManagerVersion;
this.timeStamp = 0;
this.latestNodeHeartBeatResponse.setResponseId(0); this.latestNodeHeartBeatResponse.setResponseId(0);
@ -1017,7 +1011,7 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
} }
/** /**
* Put a node in deactivated (decommissioned or shutdown) status. * Put a node in deactivated (decommissioned) status.
* @param rmNode * @param rmNode
* @param finalState * @param finalState
*/ */
@ -1034,10 +1028,6 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
LOG.info("Deactivating Node " + rmNode.nodeId + " as it is now " LOG.info("Deactivating Node " + rmNode.nodeId + " as it is now "
+ finalState); + finalState);
rmNode.context.getInactiveRMNodes().put(rmNode.nodeId, rmNode); rmNode.context.getInactiveRMNodes().put(rmNode.nodeId, rmNode);
if (finalState == NodeState.SHUTDOWN &&
rmNode.context.getNodesListManager().isUntrackedNode(rmNode.hostName)) {
rmNode.setUntrackedTimeStamp(Time.monotonicNow());
}
} }
/** /**
@ -1393,14 +1383,4 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
public Resource getOriginalTotalCapability() { public Resource getOriginalTotalCapability() {
return this.originalTotalCapability; return this.originalTotalCapability;
} }
@Override
public long getUntrackedTimeStamp() {
return this.timeStamp;
}
@Override
public void setUntrackedTimeStamp(long ts) {
this.timeStamp = ts;
}
} }

View File

@ -29,8 +29,8 @@ import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils; import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.hadoop.yarn.util.resource.Resources;
@ -75,6 +76,7 @@ public class AppSchedulingInfo {
private AtomicBoolean userBlacklistChanged = new AtomicBoolean(false); private AtomicBoolean userBlacklistChanged = new AtomicBoolean(false);
private final Set<String> amBlacklist = new HashSet<>(); private final Set<String> amBlacklist = new HashSet<>();
private Set<String> userBlacklist = new HashSet<>(); private Set<String> userBlacklist = new HashSet<>();
private Set<String> requestedPartitions = new HashSet<>();
final Set<Priority> priorities = new TreeSet<>(COMPARATOR); final Set<Priority> priorities = new TreeSet<>(COMPARATOR);
final Map<Priority, Map<String, ResourceRequest>> resourceRequestMap = final Map<Priority, Map<String, ResourceRequest>> resourceRequestMap =
@ -119,6 +121,10 @@ public class AppSchedulingInfo {
return pending; return pending;
} }
public Set<String> getRequestedPartitions() {
return requestedPartitions;
}
/** /**
* Clear any pending requests from this application. * Clear any pending requests from this application.
*/ */
@ -340,6 +346,10 @@ public class AppSchedulingInfo {
asks.put(resourceName, request); asks.put(resourceName, request);
if (resourceName.equals(ResourceRequest.ANY)) { if (resourceName.equals(ResourceRequest.ANY)) {
//update the applications requested labels set
requestedPartitions.add(request.getNodeLabelExpression() == null
? RMNodeLabelsManager.NO_LABEL : request.getNodeLabelExpression());
anyResourcesUpdated = true; anyResourcesUpdated = true;
// Activate application. Metrics activation is done here. // Activate application. Metrics activation is done here.

View File

@ -439,9 +439,8 @@ public abstract class AbstractCSQueue implements CSQueue {
* limit-set-by-parent) * limit-set-by-parent)
*/ */
Resource queueMaxResource = Resource queueMaxResource =
Resources.multiplyAndNormalizeDown(resourceCalculator, getQueueMaxResource(nodePartition, clusterResource);
labelManager.getResourceByLabel(nodePartition, clusterResource),
queueCapacities.getAbsoluteMaximumCapacity(nodePartition), minimumAllocation);
return Resources.min(resourceCalculator, clusterResource, return Resources.min(resourceCalculator, clusterResource,
queueMaxResource, currentResourceLimits.getLimit()); queueMaxResource, currentResourceLimits.getLimit());
} else if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) { } else if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) {
@ -453,6 +452,13 @@ public abstract class AbstractCSQueue implements CSQueue {
return Resources.none(); return Resources.none();
} }
Resource getQueueMaxResource(String nodePartition, Resource clusterResource) {
return Resources.multiplyAndNormalizeDown(resourceCalculator,
labelManager.getResourceByLabel(nodePartition, clusterResource),
queueCapacities.getAbsoluteMaximumCapacity(nodePartition),
minimumAllocation);
}
synchronized boolean canAssignToThisQueue(Resource clusterResource, synchronized boolean canAssignToThisQueue(Resource clusterResource,
String nodePartition, ResourceLimits currentResourceLimits, String nodePartition, ResourceLimits currentResourceLimits,
Resource resourceCouldBeUnreserved, SchedulingMode schedulingMode) { Resource resourceCouldBeUnreserved, SchedulingMode schedulingMode) {

View File

@ -17,8 +17,12 @@
*/ */
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import java.util.Set;
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.util.resource.Resources;
public class CapacityHeadroomProvider { public class CapacityHeadroomProvider {
@ -45,15 +49,25 @@ public class CapacityHeadroomProvider {
queueCurrentLimit = queueResourceLimitsInfo.getQueueCurrentLimit(); queueCurrentLimit = queueResourceLimitsInfo.getQueueCurrentLimit();
clusterResource = queueResourceLimitsInfo.getClusterResource(); clusterResource = queueResourceLimitsInfo.getClusterResource();
} }
Resource headroom = queue.getHeadroom(user, queueCurrentLimit, Set<String> requestedPartitions =
clusterResource, application); application.getAppSchedulingInfo().getRequestedPartitions();
Resource headroom;
if (requestedPartitions.isEmpty() || (requestedPartitions.size() == 1
&& requestedPartitions.contains(RMNodeLabelsManager.NO_LABEL))) {
headroom = queue.getHeadroom(user, queueCurrentLimit, clusterResource,
application);
} else {
headroom = Resource.newInstance(0, 0);
for (String partition : requestedPartitions) {
Resource partitionHeadRoom = queue.getHeadroom(user, queueCurrentLimit,
clusterResource, application, partition);
Resources.addTo(headroom, partitionHeadRoom);
}
}
// Corner case to deal with applications being slightly over-limit // Corner case to deal with applications being slightly over-limit
if (headroom.getMemory() < 0) { if (headroom.getMemory() < 0) {
headroom.setMemory(0); headroom.setMemory(0);
} }
return headroom; return headroom;
} }
} }

Some files were not shown because too many files have changed in this diff Show More