Merge remote-tracking branch 'apache-commit/trunk' into HDDS-48
This commit is contained in:
commit
418cff4820
|
@ -617,7 +617,7 @@ OTHER DEALINGS IN THE SOFTWARE.
|
|||
|
||||
The binary distribution of this product bundles these dependencies under the
|
||||
following license:
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.3.7
|
||||
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
|
||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
|
||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
|
||||
|
@ -761,7 +761,7 @@ THE SOFTWARE.
|
|||
|
||||
|
||||
For:
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-3.3.1.min.js
|
||||
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
|
||||
Apache HBase - Server which contains JQuery minified javascript library version 1.8.3
|
||||
|
|
|
@ -18,4 +18,5 @@
|
|||
|
||||
<suppressions>
|
||||
<suppress checks="JavadocPackage" files="[\\/]src[\\/]test[\\/].*"/>
|
||||
<suppress checks="*" files="[\\/]hadoop-ozone[\\/]docs[\\/]themes[\\/]ozonedoc[\\/].*"/>
|
||||
</suppressions>
|
||||
|
|
|
@ -83,13 +83,6 @@
|
|||
<artifactId>hadoop-minicluster</artifactId>
|
||||
<optional>true</optional>
|
||||
<exclusions>
|
||||
<!-- Exclude the in-development timeline service and
|
||||
add it as an optional runtime dependency
|
||||
-->
|
||||
<exclusion>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-yarn-server-timelineservice</artifactId>
|
||||
</exclusion>
|
||||
<!-- exclude everything that comes in via the shaded runtime and api TODO remove once we have a filter for "is in these artifacts" -->
|
||||
<!-- Skip jersey, since we need it again here. -->
|
||||
<exclusion>
|
||||
|
@ -149,14 +142,6 @@
|
|||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-yarn-common</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-yarn-server-common</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.fusesource.leveldbjni</groupId>
|
||||
<artifactId>leveldbjni-all</artifactId>
|
||||
|
@ -225,6 +210,110 @@
|
|||
<groupId>javax.servlet</groupId>
|
||||
<artifactId>javax.servlet-api</artifactId>
|
||||
</exclusion>
|
||||
|
||||
<!-- removing dependency jars from yarn-server-common, which are
|
||||
already included in hadoop-client-runtime and hadoop-client-api
|
||||
-->
|
||||
<exclusion>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-auth</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpclient</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpcore</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.nimbusds</groupId>
|
||||
<artifactId>nimbus-jose-jwt</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>net.minidev</groupId>
|
||||
<artifactId>json-smart</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>net.minidev</groupId>
|
||||
<artifactId>accessors-smart</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerb-simplekdc</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerb-util</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>token-provider</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerb-common</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerb-crypto</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerby-util</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerb-common</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerby-pkix</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerby-asn1</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerb-core</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerby-config</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerby-xdr</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerb-identity</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerb-server</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerb-identity</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
<artifactId>kerb-admin</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
<artifactId>curator-framework</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
<artifactId>curator-recipes</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>commons-net</groupId>
|
||||
<artifactId>commons-net</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<!-- Add optional runtime dependency on the in-development timeline server module
|
||||
|
@ -568,8 +657,6 @@
|
|||
<exclude>commons-logging:commons-logging</exclude>
|
||||
<exclude>junit:junit</exclude>
|
||||
<exclude>com.google.code.findbugs:jsr305</exclude>
|
||||
<!-- Keep optional runtime deps out of the shading -->
|
||||
<exclude>org.apache.hadoop:hadoop-yarn-server-timelineservice</exclude>
|
||||
<exclude>log4j:log4j</exclude>
|
||||
<!-- We need a filter that matches just those things that are included in the above artiacts -->
|
||||
</excludes>
|
||||
|
@ -670,10 +757,43 @@
|
|||
<!-- keep optional runtime configuration out of the jar; downstream can provide -->
|
||||
<exclude>capacity-scheduler.xml</exclude>
|
||||
<exclude>krb5.conf</exclude>
|
||||
<exclude>.keep</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
|
||||
<!-- remove .xsd from ehcache -->
|
||||
<filter>
|
||||
<artifact>org.ehcache</artifact>
|
||||
<excludes>
|
||||
<exclude>ehcache-107ext.xsd</exclude>
|
||||
<exclude>ehcache-core.xsd</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
|
||||
<!-- remove utility classes which are not required from
|
||||
dnsjava -->
|
||||
<filter>
|
||||
<artifact>dnsjava:dnsjava</artifact>
|
||||
<excludes>
|
||||
<excldue>dig*</excldue>
|
||||
<exclude>jnamed*</exclude>
|
||||
<exlcude>lookup*</exlcude>
|
||||
<exclude>update*</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
|
||||
</filters>
|
||||
|
||||
<!-- relocate classes from mssql-jdbc -->
|
||||
<relocations>
|
||||
<relocation>
|
||||
<pattern>microsoft/</pattern>
|
||||
<shadedPattern>${shaded.dependency.prefix}.microsoft.</shadedPattern>
|
||||
<excludes>
|
||||
<exclude>**/pom.xml</exclude>
|
||||
</excludes>
|
||||
</relocation>
|
||||
|
||||
<relocation>
|
||||
<pattern>org/</pattern>
|
||||
<shadedPattern>${shaded.dependency.prefix}.org.</shadedPattern>
|
||||
|
|
|
@ -2040,6 +2040,35 @@ function hadoop_start_secure_daemon_wrapper
|
|||
return 0
|
||||
}
|
||||
|
||||
## @description Wait till process dies or till timeout
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @param pid
|
||||
## @param timeout
|
||||
function wait_process_to_die_or_timeout
|
||||
{
|
||||
local pid=$1
|
||||
local timeout=$2
|
||||
|
||||
# Normalize timeout
|
||||
# Round up or down
|
||||
timeout=$(printf "%.0f\n" "${timeout}")
|
||||
if [[ ${timeout} -lt 1 ]]; then
|
||||
# minimum 1 second
|
||||
timeout=1
|
||||
fi
|
||||
|
||||
# Wait to see if it's still alive
|
||||
for (( i=0; i < "${timeout}"; i++ ))
|
||||
do
|
||||
if kill -0 "${pid}" > /dev/null 2>&1; then
|
||||
sleep 1
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
## @description Stop the non-privileged `command` daemon with that
|
||||
## @description that is running at `pidfile`.
|
||||
## @audience public
|
||||
|
@ -2060,11 +2089,14 @@ function hadoop_stop_daemon
|
|||
pid=$(cat "$pidfile")
|
||||
|
||||
kill "${pid}" >/dev/null 2>&1
|
||||
sleep "${HADOOP_STOP_TIMEOUT}"
|
||||
|
||||
wait_process_to_die_or_timeout "${pid}" "${HADOOP_STOP_TIMEOUT}"
|
||||
|
||||
if kill -0 "${pid}" > /dev/null 2>&1; then
|
||||
hadoop_error "WARNING: ${cmd} did not stop gracefully after ${HADOOP_STOP_TIMEOUT} seconds: Trying to kill with kill -9"
|
||||
kill -9 "${pid}" >/dev/null 2>&1
|
||||
fi
|
||||
wait_process_to_die_or_timeout "${pid}" "${HADOOP_STOP_TIMEOUT}"
|
||||
if ps -p "${pid}" > /dev/null 2>&1; then
|
||||
hadoop_error "ERROR: Unable to kill ${pid}"
|
||||
else
|
||||
|
|
|
@ -3605,6 +3605,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
private volatile long bytesReadDistanceOfOneOrTwo;
|
||||
private volatile long bytesReadDistanceOfThreeOrFour;
|
||||
private volatile long bytesReadDistanceOfFiveOrLarger;
|
||||
private volatile long bytesReadErasureCoded;
|
||||
|
||||
/**
|
||||
* Add another StatisticsData object to this one.
|
||||
|
@ -3621,6 +3622,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
other.bytesReadDistanceOfThreeOrFour;
|
||||
this.bytesReadDistanceOfFiveOrLarger +=
|
||||
other.bytesReadDistanceOfFiveOrLarger;
|
||||
this.bytesReadErasureCoded += other.bytesReadErasureCoded;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3638,6 +3640,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
-this.bytesReadDistanceOfThreeOrFour;
|
||||
this.bytesReadDistanceOfFiveOrLarger =
|
||||
-this.bytesReadDistanceOfFiveOrLarger;
|
||||
this.bytesReadErasureCoded = -this.bytesReadErasureCoded;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -3682,6 +3685,10 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
public long getBytesReadDistanceOfFiveOrLarger() {
|
||||
return bytesReadDistanceOfFiveOrLarger;
|
||||
}
|
||||
|
||||
public long getBytesReadErasureCoded() {
|
||||
return bytesReadErasureCoded;
|
||||
}
|
||||
}
|
||||
|
||||
private interface StatisticsAggregator<T> {
|
||||
|
@ -3873,6 +3880,14 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
getThreadStatistics().writeOps += count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the bytes read on erasure-coded files in the statistics.
|
||||
* @param newBytes the additional bytes read
|
||||
*/
|
||||
public void incrementBytesReadErasureCoded(long newBytes) {
|
||||
getThreadStatistics().bytesReadErasureCoded += newBytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the bytes read by the network distance in the statistics
|
||||
* In the common network topology setup, distance value should be an even
|
||||
|
@ -4067,6 +4082,25 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the total number of bytes read on erasure-coded files.
|
||||
* @return the number of bytes
|
||||
*/
|
||||
public long getBytesReadErasureCoded() {
|
||||
return visitAll(new StatisticsAggregator<Long>() {
|
||||
private long bytesReadErasureCoded = 0;
|
||||
|
||||
@Override
|
||||
public void accept(StatisticsData data) {
|
||||
bytesReadErasureCoded += data.bytesReadErasureCoded;
|
||||
}
|
||||
|
||||
public Long aggregate() {
|
||||
return bytesReadErasureCoded;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return visitAll(new StatisticsAggregator<String>() {
|
||||
|
|
|
@ -46,7 +46,8 @@ public class FileSystemStorageStatistics extends StorageStatistics {
|
|||
"bytesReadLocalHost",
|
||||
"bytesReadDistanceOfOneOrTwo",
|
||||
"bytesReadDistanceOfThreeOrFour",
|
||||
"bytesReadDistanceOfFiveOrLarger"
|
||||
"bytesReadDistanceOfFiveOrLarger",
|
||||
"bytesReadErasureCoded"
|
||||
};
|
||||
|
||||
private static class LongStatisticIterator
|
||||
|
@ -104,6 +105,8 @@ public class FileSystemStorageStatistics extends StorageStatistics {
|
|||
return data.getBytesReadDistanceOfThreeOrFour();
|
||||
case "bytesReadDistanceOfFiveOrLarger":
|
||||
return data.getBytesReadDistanceOfFiveOrLarger();
|
||||
case "bytesReadErasureCoded":
|
||||
return data.getBytesReadErasureCoded();
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -198,6 +198,12 @@ public class FileUtil {
|
|||
* use getCanonicalPath in File to get the target of the symlink but that
|
||||
* does not indicate if the given path refers to a symlink.
|
||||
*/
|
||||
|
||||
if (f == null) {
|
||||
LOG.warn("Can not read a null symLink");
|
||||
return "";
|
||||
}
|
||||
|
||||
try {
|
||||
return Shell.execCommand(
|
||||
Shell.getReadlinkCommand(f.toString())).trim();
|
||||
|
@ -1033,6 +1039,13 @@ public class FileUtil {
|
|||
* @return 0 on success
|
||||
*/
|
||||
public static int symLink(String target, String linkname) throws IOException{
|
||||
|
||||
if (target == null || linkname == null) {
|
||||
LOG.warn("Can not create a symLink with a target = " + target
|
||||
+ " and link =" + linkname);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Run the input paths through Java's File so that they are converted to the
|
||||
// native OS form
|
||||
File targetFile = new File(
|
||||
|
|
|
@ -57,7 +57,7 @@ class FsUrlConnection extends URLConnection {
|
|||
try {
|
||||
LOG.debug("Connecting to {}", url);
|
||||
FileSystem fs = FileSystem.get(url.toURI(), conf);
|
||||
is = fs.open(new Path(url.getPath()));
|
||||
is = fs.open(new Path(url.toURI()));
|
||||
} catch (URISyntaxException e) {
|
||||
throw new IOException(e.toString());
|
||||
}
|
||||
|
|
|
@ -1420,8 +1420,11 @@ public final class HttpServer2 implements FilterContainer {
|
|||
|
||||
if (servletContext.getAttribute(ADMINS_ACL) != null &&
|
||||
!userHasAdministratorAccess(servletContext, remoteUser)) {
|
||||
response.sendError(HttpServletResponse.SC_FORBIDDEN, "User "
|
||||
+ remoteUser + " is unauthorized to access this page.");
|
||||
response.sendError(HttpServletResponse.SC_FORBIDDEN,
|
||||
"Unauthenticated users are not " +
|
||||
"authorized to access this page.");
|
||||
LOG.warn("User " + remoteUser + " is unauthorized to access the page "
|
||||
+ request.getRequestURI() + ".");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
/**
|
||||
* Abstract native raw decoder for all native coders to extend with.
|
||||
|
@ -34,13 +35,20 @@ abstract class AbstractNativeRawDecoder extends RawErasureDecoder {
|
|||
public static Logger LOG =
|
||||
LoggerFactory.getLogger(AbstractNativeRawDecoder.class);
|
||||
|
||||
// Protect ISA-L coder data structure in native layer from being accessed and
|
||||
// updated concurrently by the init, release and decode functions.
|
||||
protected final ReentrantReadWriteLock decoderLock =
|
||||
new ReentrantReadWriteLock();
|
||||
|
||||
public AbstractNativeRawDecoder(ErasureCoderOptions coderOptions) {
|
||||
super(coderOptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void doDecode(ByteBufferDecodingState decodingState)
|
||||
protected void doDecode(ByteBufferDecodingState decodingState)
|
||||
throws IOException {
|
||||
decoderLock.readLock().lock();
|
||||
try {
|
||||
if (nativeCoder == 0) {
|
||||
throw new IOException(String.format("%s closed",
|
||||
getClass().getSimpleName()));
|
||||
|
@ -64,6 +72,9 @@ abstract class AbstractNativeRawDecoder extends RawErasureDecoder {
|
|||
performDecodeImpl(decodingState.inputs, inputOffsets,
|
||||
decodingState.decodeLength, decodingState.erasedIndexes,
|
||||
decodingState.outputs, outputOffsets);
|
||||
} finally {
|
||||
decoderLock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void performDecodeImpl(ByteBuffer[] inputs,
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
/**
|
||||
* Abstract native raw encoder for all native coders to extend with.
|
||||
|
@ -34,13 +35,20 @@ abstract class AbstractNativeRawEncoder extends RawErasureEncoder {
|
|||
public static Logger LOG =
|
||||
LoggerFactory.getLogger(AbstractNativeRawEncoder.class);
|
||||
|
||||
// Protect ISA-L coder data structure in native layer from being accessed and
|
||||
// updated concurrently by the init, release and encode functions.
|
||||
protected final ReentrantReadWriteLock encoderLock =
|
||||
new ReentrantReadWriteLock();
|
||||
|
||||
public AbstractNativeRawEncoder(ErasureCoderOptions coderOptions) {
|
||||
super(coderOptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void doEncode(ByteBufferEncodingState encodingState)
|
||||
protected void doEncode(ByteBufferEncodingState encodingState)
|
||||
throws IOException {
|
||||
encoderLock.readLock().lock();
|
||||
try {
|
||||
if (nativeCoder == 0) {
|
||||
throw new IOException(String.format("%s closed",
|
||||
getClass().getSimpleName()));
|
||||
|
@ -62,6 +70,9 @@ abstract class AbstractNativeRawEncoder extends RawErasureEncoder {
|
|||
|
||||
performEncodeImpl(encodingState.inputs, inputOffsets, dataLen,
|
||||
encodingState.outputs, outputOffsets);
|
||||
} finally {
|
||||
encoderLock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void performEncodeImpl(
|
||||
|
|
|
@ -36,19 +36,30 @@ public class NativeRSRawDecoder extends AbstractNativeRawDecoder {
|
|||
|
||||
public NativeRSRawDecoder(ErasureCoderOptions coderOptions) {
|
||||
super(coderOptions);
|
||||
initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits());
|
||||
decoderLock.writeLock().lock();
|
||||
try {
|
||||
initImpl(coderOptions.getNumDataUnits(),
|
||||
coderOptions.getNumParityUnits());
|
||||
} finally {
|
||||
decoderLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void performDecodeImpl(
|
||||
protected void performDecodeImpl(
|
||||
ByteBuffer[] inputs, int[] inputOffsets, int dataLen, int[] erased,
|
||||
ByteBuffer[] outputs, int[] outputOffsets) throws IOException {
|
||||
decodeImpl(inputs, inputOffsets, dataLen, erased, outputs, outputOffsets);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void release() {
|
||||
public void release() {
|
||||
decoderLock.writeLock().lock();
|
||||
try {
|
||||
destroyImpl();
|
||||
} finally {
|
||||
decoderLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -36,19 +36,30 @@ public class NativeRSRawEncoder extends AbstractNativeRawEncoder {
|
|||
|
||||
public NativeRSRawEncoder(ErasureCoderOptions coderOptions) {
|
||||
super(coderOptions);
|
||||
initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits());
|
||||
encoderLock.writeLock().lock();
|
||||
try {
|
||||
initImpl(coderOptions.getNumDataUnits(),
|
||||
coderOptions.getNumParityUnits());
|
||||
} finally {
|
||||
encoderLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void performEncodeImpl(
|
||||
protected void performEncodeImpl(
|
||||
ByteBuffer[] inputs, int[] inputOffsets, int dataLen,
|
||||
ByteBuffer[] outputs, int[] outputOffsets) throws IOException {
|
||||
encodeImpl(inputs, inputOffsets, dataLen, outputs, outputOffsets);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void release() {
|
||||
public void release() {
|
||||
encoderLock.writeLock().lock();
|
||||
try {
|
||||
destroyImpl();
|
||||
} finally {
|
||||
encoderLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -36,19 +36,30 @@ public class NativeXORRawDecoder extends AbstractNativeRawDecoder {
|
|||
|
||||
public NativeXORRawDecoder(ErasureCoderOptions coderOptions) {
|
||||
super(coderOptions);
|
||||
initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits());
|
||||
decoderLock.writeLock().lock();
|
||||
try {
|
||||
initImpl(coderOptions.getNumDataUnits(),
|
||||
coderOptions.getNumParityUnits());
|
||||
} finally {
|
||||
decoderLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void performDecodeImpl(
|
||||
protected void performDecodeImpl(
|
||||
ByteBuffer[] inputs, int[] inputOffsets, int dataLen, int[] erased,
|
||||
ByteBuffer[] outputs, int[] outputOffsets) throws IOException {
|
||||
decodeImpl(inputs, inputOffsets, dataLen, erased, outputs, outputOffsets);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void release() {
|
||||
public void release() {
|
||||
decoderLock.writeLock().lock();
|
||||
try {
|
||||
destroyImpl();
|
||||
} finally {
|
||||
decoderLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private native void initImpl(int numDataUnits, int numParityUnits);
|
||||
|
|
|
@ -36,19 +36,30 @@ public class NativeXORRawEncoder extends AbstractNativeRawEncoder {
|
|||
|
||||
public NativeXORRawEncoder(ErasureCoderOptions coderOptions) {
|
||||
super(coderOptions);
|
||||
initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits());
|
||||
encoderLock.writeLock().lock();
|
||||
try {
|
||||
initImpl(coderOptions.getNumDataUnits(),
|
||||
coderOptions.getNumParityUnits());
|
||||
} finally {
|
||||
encoderLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void performEncodeImpl(
|
||||
protected void performEncodeImpl(
|
||||
ByteBuffer[] inputs, int[] inputOffsets, int dataLen,
|
||||
ByteBuffer[] outputs, int[] outputOffsets) throws IOException {
|
||||
encodeImpl(inputs, inputOffsets, dataLen, outputs, outputOffsets);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void release() {
|
||||
public void release() {
|
||||
encoderLock.writeLock().lock();
|
||||
try {
|
||||
destroyImpl();
|
||||
} finally {
|
||||
encoderLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private native void initImpl(int numDataUnits, int numParityUnits);
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.security;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import java.net.InetAddress;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* An implementation of SaslPropertiesResolver. Used on server side,
|
||||
* returns SASL properties based on the port the client is connecting
|
||||
* to. This should be used along with server side enabling multiple ports
|
||||
* TODO: when NN multiple listener is enabled, automatically use this
|
||||
* resolver without having to set in config.
|
||||
*
|
||||
* For configuration, for example if server runs on two ports 9000 and 9001,
|
||||
* and we want to specify 9000 to use auth-conf and 9001 to use auth.
|
||||
*
|
||||
* We need to set the following configuration properties:
|
||||
* ingress.port.sasl.configured.ports=9000,9001
|
||||
* ingress.port.sasl.prop.9000=privacy
|
||||
* ingress.port.sasl.prop.9001=authentication
|
||||
*
|
||||
* One note is that, if there is misconfiguration that a port, say, 9002 is
|
||||
* given in ingress.port.sasl.configured.ports, but it's sasl prop is not
|
||||
* set, a default of QOP of privacy (auth-conf) will be used. In addition,
|
||||
* if a port is not given even in ingress.port.sasl.configured.ports, but
|
||||
* is being checked in getServerProperties(), the default SASL prop will
|
||||
* be returned. Both of these two cases are considered misconfiguration.
|
||||
*/
|
||||
public class IngressPortBasedResolver extends SaslPropertiesResolver {
|
||||
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(IngressPortBasedResolver.class.getName());
|
||||
|
||||
static final String INGRESS_PORT_SASL_PROP_PREFIX = "ingress.port.sasl.prop";
|
||||
|
||||
static final String INGRESS_PORT_SASL_CONFIGURED_PORTS =
|
||||
"ingress.port.sasl.configured.ports";
|
||||
|
||||
// no need to concurrent map, because after setConf() it never change,
|
||||
// only for read.
|
||||
private HashMap<Integer, Map<String, String>> portPropMapping;
|
||||
|
||||
@Override
|
||||
public void setConf(Configuration conf) {
|
||||
super.setConf(conf);
|
||||
portPropMapping = new HashMap<>();
|
||||
Collection<String> portStrings =
|
||||
conf.getTrimmedStringCollection(INGRESS_PORT_SASL_CONFIGURED_PORTS);
|
||||
for (String portString : portStrings) {
|
||||
int port = Integer.parseInt(portString);
|
||||
String configKey = INGRESS_PORT_SASL_PROP_PREFIX + "." + portString;
|
||||
Map<String, String> props = getSaslProperties(conf, configKey,
|
||||
SaslRpcServer.QualityOfProtection.PRIVACY);
|
||||
portPropMapping.put(port, props);
|
||||
}
|
||||
LOG.debug("Configured with port to QOP mapping as:" + portPropMapping);
|
||||
}
|
||||
|
||||
/**
|
||||
* Identify the Sasl Properties to be used for a connection with a client.
|
||||
* @param clientAddress client's address
|
||||
* @param ingressPort the port that the client is connecting
|
||||
* @return the sasl properties to be used for the connection.
|
||||
*/
|
||||
@Override
|
||||
@VisibleForTesting
|
||||
public Map<String, String> getServerProperties(InetAddress clientAddress,
|
||||
int ingressPort) {
|
||||
LOG.debug("Resolving SASL properties for " + clientAddress + " "
|
||||
+ ingressPort);
|
||||
if (!portPropMapping.containsKey(ingressPort)) {
|
||||
LOG.warn("An un-configured port is being requested " + ingressPort
|
||||
+ " using default");
|
||||
return getDefaultProperties();
|
||||
}
|
||||
return portPropMapping.get(ingressPort);
|
||||
}
|
||||
}
|
|
@ -18,7 +18,6 @@
|
|||
package org.apache.hadoop.security;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
|
@ -95,6 +94,17 @@ public class SaslPropertiesResolver implements Configurable{
|
|||
return properties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Identify the Sasl Properties to be used for a connection with a client.
|
||||
* @param clientAddress client's address
|
||||
* @param ingressPort the port that the client is connecting
|
||||
* @return the sasl properties to be used for the connection.
|
||||
*/
|
||||
public Map<String, String> getServerProperties(InetAddress clientAddress,
|
||||
int ingressPort){
|
||||
return properties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Identify the Sasl Properties to be used for a connection with a server.
|
||||
* @param serverAddress server's address
|
||||
|
@ -103,4 +113,39 @@ public class SaslPropertiesResolver implements Configurable{
|
|||
public Map<String, String> getClientProperties(InetAddress serverAddress){
|
||||
return properties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Identify the Sasl Properties to be used for a connection with a server.
|
||||
* @param serverAddress server's address
|
||||
* @param ingressPort the port that is used to connect to server
|
||||
* @return the sasl properties to be used for the connection.
|
||||
*/
|
||||
public Map<String, String> getClientProperties(InetAddress serverAddress,
|
||||
int ingressPort) {
|
||||
return properties;
|
||||
}
|
||||
|
||||
/**
|
||||
* A util function to retrieve specific additional sasl property from config.
|
||||
* Used by subclasses to read sasl properties used by themselves.
|
||||
* @param conf the configuration
|
||||
* @param configKey the config key to look for
|
||||
* @param defaultQOP the default QOP if the key is missing
|
||||
* @return sasl property associated with the given key
|
||||
*/
|
||||
static Map<String, String> getSaslProperties(Configuration conf,
|
||||
String configKey, QualityOfProtection defaultQOP) {
|
||||
Map<String, String> saslProps = new TreeMap<>();
|
||||
String[] qop = conf.getStrings(configKey, defaultQOP.toString());
|
||||
|
||||
for (int i=0; i < qop.length; i++) {
|
||||
qop[i] = QualityOfProtection.valueOf(
|
||||
StringUtils.toUpperCase(qop[i])).getSaslQop();
|
||||
}
|
||||
|
||||
saslProps.put(Sasl.QOP, StringUtils.join(",", qop));
|
||||
saslProps.put(Sasl.SERVER_AUTH, "true");
|
||||
|
||||
return saslProps;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,15 +20,10 @@ package org.apache.hadoop.security;
|
|||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import javax.security.sasl.Sasl;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.security.SaslPropertiesResolver;
|
||||
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
|
||||
import org.apache.hadoop.util.CombinedIPWhiteList;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -134,18 +129,7 @@ public class WhitelistBasedResolver extends SaslPropertiesResolver {
|
|||
}
|
||||
|
||||
static Map<String, String> getSaslProperties(Configuration conf) {
|
||||
Map<String, String> saslProps =new TreeMap<String, String>();
|
||||
String[] qop = conf.getStrings(HADOOP_RPC_PROTECTION_NON_WHITELIST,
|
||||
QualityOfProtection.PRIVACY.toString());
|
||||
|
||||
for (int i=0; i < qop.length; i++) {
|
||||
qop[i] = QualityOfProtection.valueOf(
|
||||
StringUtils.toUpperCase(qop[i])).getSaslQop();
|
||||
}
|
||||
|
||||
saslProps.put(Sasl.QOP, StringUtils.join(",", qop));
|
||||
saslProps.put(Sasl.SERVER_AUTH, "true");
|
||||
|
||||
return saslProps;
|
||||
return getSaslProperties(conf, HADOOP_RPC_PROTECTION_NON_WHITELIST,
|
||||
QualityOfProtection.PRIVACY);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.io.File;
|
|||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.InputStream;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.Arrays;
|
||||
|
@ -1018,17 +1017,7 @@ public abstract class Shell {
|
|||
}
|
||||
// close the input stream
|
||||
try {
|
||||
// JDK 7 tries to automatically drain the input streams for us
|
||||
// when the process exits, but since close is not synchronized,
|
||||
// it creates a race if we close the stream first and the same
|
||||
// fd is recycled. the stream draining thread will attempt to
|
||||
// drain that fd!! it may block, OOM, or cause bizarre behavior
|
||||
// see: https://bugs.openjdk.java.net/browse/JDK-8024521
|
||||
// issue is fixed in build 7u60
|
||||
InputStream stdout = process.getInputStream();
|
||||
synchronized (stdout) {
|
||||
inReader.close();
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
LOG.warn("Error while closing the input stream", ioe);
|
||||
}
|
||||
|
@ -1037,10 +1026,7 @@ public abstract class Shell {
|
|||
joinThread(errThread);
|
||||
}
|
||||
try {
|
||||
InputStream stderr = process.getErrorStream();
|
||||
synchronized (stderr) {
|
||||
errReader.close();
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
LOG.warn("Error while closing the error stream", ioe);
|
||||
}
|
||||
|
|
|
@ -290,6 +290,18 @@ public final class ZKCuratorManager {
|
|||
* @throws Exception If it cannot create the file.
|
||||
*/
|
||||
public void createRootDirRecursively(String path) throws Exception {
|
||||
createRootDirRecursively(path, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility function to ensure that the configured base znode exists.
|
||||
* This recursively creates the znode as well as all of its parents.
|
||||
* @param path Path of the znode to create.
|
||||
* @param zkAcl ACLs for ZooKeeper.
|
||||
* @throws Exception If it cannot create the file.
|
||||
*/
|
||||
public void createRootDirRecursively(String path, List<ACL> zkAcl)
|
||||
throws Exception {
|
||||
String[] pathParts = path.split("/");
|
||||
Preconditions.checkArgument(
|
||||
pathParts.length >= 1 && pathParts[0].isEmpty(),
|
||||
|
@ -298,7 +310,7 @@ public final class ZKCuratorManager {
|
|||
|
||||
for (int i = 1; i < pathParts.length; i++) {
|
||||
sb.append("/").append(pathParts[i]);
|
||||
create(sb.toString());
|
||||
create(sb.toString(), zkAcl);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -181,6 +181,18 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
|
|||
| `WarmUpEDEKTimeAvgTime` | Average time of warming up EDEK in milliseconds |
|
||||
| `ResourceCheckTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of NameNode resource check latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||
| `StorageBlockReport`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of storage block report latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||
| `EditLogTailTimeNumOps` | Total number of times the standby NameNode tailed the edit log |
|
||||
| `EditLogTailTimeAvgTime` | Average time (in milliseconds) spent by standby NameNode in tailing edit log |
|
||||
| `EditLogTailTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in tailing edit logs by standby NameNode, in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||
| `EditLogFetchTimeNumOps` | Total number of times the standby NameNode fetched remote edit streams from journal nodes |
|
||||
| `EditLogFetchTimeAvgTime` | Average time (in milliseconds) spent by standby NameNode in fetching remote edit streams from journal nodes |
|
||||
| `EditLogFetchTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in fetching edit streams from journal nodes by standby NameNode, in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||
| `NumEditLogLoadedNumOps` | Total number of times edits were loaded by standby NameNode |
|
||||
| `NumEditLogLoadedAvgCount` | Average number of edits loaded by standby NameNode in each edit log tailing |
|
||||
| `NumEditLogLoaded`*num*`s(50/75/90/95/99)thPercentileCount` | The 50/75/90/95/99th percentile of number of edits loaded by standby NameNode in each edit log tailing. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||
| `EditLogTailIntervalNumOps` | Total number of intervals between edit log tailings by standby NameNode |
|
||||
| `EditLogTailIntervalAvgTime` | Average time of intervals between edit log tailings by standby NameNode in milliseconds |
|
||||
| `EditLogTailInterval`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time between edit log tailings by standby NameNode, in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||
|
||||
FSNamesystem
|
||||
------------
|
||||
|
|
|
@ -0,0 +1,309 @@
|
|||
|
||||
<!---
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
-->
|
||||
# Apache Hadoop Changelog
|
||||
|
||||
## Release 3.0.3 - 2018-05-31
|
||||
|
||||
### INCOMPATIBLE CHANGES:
|
||||
|
||||
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|
||||
|:---- |:---- | :--- |:---- |:---- |:---- |
|
||||
| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use the ZooKeeper as the default State Store | Minor | documentation | Yiqun Lin | Yiqun Lin |
|
||||
|
||||
|
||||
### NEW FEATURES:
|
||||
|
||||
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|
||||
|:---- |:---- | :--- |:---- |:---- |:---- |
|
||||
| [HDFS-13283](https://issues.apache.org/jira/browse/HDFS-13283) | Percentage based Reserved Space Calculation for DataNode | Major | datanode, hdfs | Lukas Majercak | Lukas Majercak |
|
||||
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|
||||
|:---- |:---- | :--- |:---- |:---- |:---- |
|
||||
| [HDFS-12455](https://issues.apache.org/jira/browse/HDFS-12455) | WebHDFS - Adding "snapshot enabled" status to ListStatus query result. | Major | snapshots, webhdfs | Ajay Kumar | Ajay Kumar |
|
||||
| [HDFS-13062](https://issues.apache.org/jira/browse/HDFS-13062) | Provide support for JN to use separate journal disk per namespace | Major | federation, journal-node | Bharat Viswanadham | Bharat Viswanadham |
|
||||
| [HDFS-12933](https://issues.apache.org/jira/browse/HDFS-12933) | Improve logging when DFSStripedOutputStream failed to write some blocks | Minor | erasure-coding | Xiao Chen | chencan |
|
||||
| [HADOOP-13972](https://issues.apache.org/jira/browse/HADOOP-13972) | ADLS to support per-store configuration | Major | fs/adl | John Zhuge | Sharad Sonker |
|
||||
| [YARN-7813](https://issues.apache.org/jira/browse/YARN-7813) | Capacity Scheduler Intra-queue Preemption should be configurable for each queue | Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
|
||||
| [HDFS-13175](https://issues.apache.org/jira/browse/HDFS-13175) | Add more information for checking argument in DiskBalancerVolume | Minor | diskbalancer | Lei (Eddy) Xu | Lei (Eddy) Xu |
|
||||
| [HDFS-11187](https://issues.apache.org/jira/browse/HDFS-11187) | Optimize disk access for last partial chunk checksum of Finalized replica | Major | datanode | Wei-Chiu Chuang | Gabor Bota |
|
||||
| [MAPREDUCE-7061](https://issues.apache.org/jira/browse/MAPREDUCE-7061) | SingleCluster setup document needs to be updated | Major | . | Bharat Viswanadham | Bharat Viswanadham |
|
||||
| [HADOOP-15263](https://issues.apache.org/jira/browse/HADOOP-15263) | hadoop cloud-storage module to mark hadoop-common as provided; add azure-datalake | Minor | build | Steve Loughran | Steve Loughran |
|
||||
| [MAPREDUCE-7060](https://issues.apache.org/jira/browse/MAPREDUCE-7060) | Cherry Pick PathOutputCommitter class/factory to branch-3.0 | Minor | . | Steve Loughran | Steve Loughran |
|
||||
| [HADOOP-15279](https://issues.apache.org/jira/browse/HADOOP-15279) | increase maven heap size recommendations | Minor | build, documentation, test | Allen Wittenauer | Allen Wittenauer |
|
||||
| [HDFS-13170](https://issues.apache.org/jira/browse/HDFS-13170) | Port webhdfs unmaskedpermission parameter to HTTPFS | Major | . | Stephen O'Donnell | Stephen O'Donnell |
|
||||
| [HDFS-13225](https://issues.apache.org/jira/browse/HDFS-13225) | StripeReader#checkMissingBlocks() 's IOException info is incomplete | Major | erasure-coding, hdfs-client | lufei | lufei |
|
||||
| [HDFS-11394](https://issues.apache.org/jira/browse/HDFS-11394) | Support for getting erasure coding policy through WebHDFS#FileStatus | Major | erasure-coding, namenode | Kai Sasaki | Kai Sasaki |
|
||||
| [HADOOP-15311](https://issues.apache.org/jira/browse/HADOOP-15311) | HttpServer2 needs a way to configure the acceptor/selector count | Major | common | Erik Krogen | Erik Krogen |
|
||||
| [HDFS-11600](https://issues.apache.org/jira/browse/HDFS-11600) | Refactor TestDFSStripedOutputStreamWithFailure test classes | Minor | erasure-coding, test | Andrew Wang | SammiChen |
|
||||
| [HDFS-12884](https://issues.apache.org/jira/browse/HDFS-12884) | BlockUnderConstructionFeature.truncateBlock should be of type BlockInfo | Major | namenode | Konstantin Shvachko | chencan |
|
||||
| [HADOOP-15334](https://issues.apache.org/jira/browse/HADOOP-15334) | Upgrade Maven surefire plugin | Major | build | Arpit Agarwal | Arpit Agarwal |
|
||||
| [HADOOP-15312](https://issues.apache.org/jira/browse/HADOOP-15312) | Undocumented KeyProvider configuration keys | Major | . | Wei-Chiu Chuang | LiXin Ge |
|
||||
| [YARN-7623](https://issues.apache.org/jira/browse/YARN-7623) | Fix the CapacityScheduler Queue configuration documentation | Major | . | Arun Suresh | Jonathan Hung |
|
||||
| [HDFS-13314](https://issues.apache.org/jira/browse/HDFS-13314) | NameNode should optionally exit if it detects FsImage corruption | Major | namenode | Arpit Agarwal | Arpit Agarwal |
|
||||
| [HADOOP-15342](https://issues.apache.org/jira/browse/HADOOP-15342) | Update ADLS connector to use the current SDK version (2.2.7) | Major | fs/adl | Atul Sikaria | Atul Sikaria |
|
||||
| [HDFS-13462](https://issues.apache.org/jira/browse/HDFS-13462) | Add BIND\_HOST configuration for JournalNode's HTTP and RPC Servers | Major | hdfs, journal-node | Lukas Majercak | Lukas Majercak |
|
||||
| [HADOOP-14841](https://issues.apache.org/jira/browse/HADOOP-14841) | Kms client should disconnect if unable to get output stream from connection. | Major | kms | Xiao Chen | Rushabh S Shah |
|
||||
| [HDFS-12981](https://issues.apache.org/jira/browse/HDFS-12981) | renameSnapshot a Non-Existent snapshot to itself should throw error | Minor | hdfs | Sailesh Patel | Kitti Nanasi |
|
||||
| [YARN-8201](https://issues.apache.org/jira/browse/YARN-8201) | Skip stacktrace of few exception from ClientRMService | Minor | . | Bibin A Chundatt | Bilwa S T |
|
||||
| [HADOOP-15441](https://issues.apache.org/jira/browse/HADOOP-15441) | Log kms url and token service at debug level. | Minor | . | Wei-Chiu Chuang | Gabor Bota |
|
||||
| [HDFS-13544](https://issues.apache.org/jira/browse/HDFS-13544) | Improve logging for JournalNode in federated cluster | Major | federation, hdfs | Hanisha Koneru | Hanisha Koneru |
|
||||
| [HADOOP-15486](https://issues.apache.org/jira/browse/HADOOP-15486) | Make NetworkTopology#netLock fair | Major | net | Nanda kumar | Nanda kumar |
|
||||
| [HDFS-13493](https://issues.apache.org/jira/browse/HDFS-13493) | Reduce the HttpServer2 thread count on DataNodes | Major | datanode | Erik Krogen | Erik Krogen |
|
||||
| [HADOOP-15449](https://issues.apache.org/jira/browse/HADOOP-15449) | Increase default timeout of ZK session to avoid frequent NameNode failover | Critical | common | Karthik Palanisamy | Karthik Palanisamy |
|
||||
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|
||||
|:---- |:---- | :--- |:---- |:---- |:---- |
|
||||
| [HDFS-11968](https://issues.apache.org/jira/browse/HDFS-11968) | ViewFS: StoragePolicies commands fail with HDFS federation | Major | hdfs | Mukul Kumar Singh | Mukul Kumar Singh |
|
||||
| [HDFS-12813](https://issues.apache.org/jira/browse/HDFS-12813) | RequestHedgingProxyProvider can hide Exception thrown from the Namenode for proxy size of 1 | Major | ha | Mukul Kumar Singh | Mukul Kumar Singh |
|
||||
| [HDFS-13048](https://issues.apache.org/jira/browse/HDFS-13048) | LowRedundancyReplicatedBlocks metric can be negative | Major | metrics | Akira Ajisaka | Akira Ajisaka |
|
||||
| [HDFS-13115](https://issues.apache.org/jira/browse/HDFS-13115) | In getNumUnderConstructionBlocks(), ignore the inodeIds for which the inodes have been deleted | Major | . | Yongjun Zhang | Yongjun Zhang |
|
||||
| [HDFS-10453](https://issues.apache.org/jira/browse/HDFS-10453) | ReplicationMonitor thread could stuck for long time due to the race between replication and delete of same file in a large cluster. | Major | namenode | He Xiaoqiao | He Xiaoqiao |
|
||||
| [MAPREDUCE-7053](https://issues.apache.org/jira/browse/MAPREDUCE-7053) | Timed out tasks can fail to produce thread dump | Major | . | Jason Lowe | Jason Lowe |
|
||||
| [HADOOP-15206](https://issues.apache.org/jira/browse/HADOOP-15206) | BZip2 drops and duplicates records when input split size is small | Major | . | Aki Tanaka | Aki Tanaka |
|
||||
| [YARN-7937](https://issues.apache.org/jira/browse/YARN-7937) | Fix http method name in Cluster Application Timeout Update API example request | Minor | docs, documentation | Charan Hebri | Charan Hebri |
|
||||
| [YARN-7947](https://issues.apache.org/jira/browse/YARN-7947) | Capacity Scheduler intra-queue preemption can NPE for non-schedulable apps | Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
|
||||
| [HADOOP-10571](https://issues.apache.org/jira/browse/HADOOP-10571) | Use Log.\*(Object, Throwable) overload to log exceptions | Major | . | Arpit Agarwal | Andras Bokor |
|
||||
| [HDFS-12781](https://issues.apache.org/jira/browse/HDFS-12781) | After Datanode down, In Namenode UI Datanode tab is throwing warning message. | Major | datanode | Harshakiran Reddy | Brahma Reddy Battula |
|
||||
| [HDFS-12070](https://issues.apache.org/jira/browse/HDFS-12070) | Failed block recovery leaves files open indefinitely and at risk for data loss | Major | . | Daryn Sharp | Kihwal Lee |
|
||||
| [HDFS-13145](https://issues.apache.org/jira/browse/HDFS-13145) | SBN crash when transition to ANN with in-progress edit tailing enabled | Major | ha, namenode | Chao Sun | Chao Sun |
|
||||
| [HDFS-13114](https://issues.apache.org/jira/browse/HDFS-13114) | CryptoAdmin#ReencryptZoneCommand should resolve Namespace info from path | Major | encryption, hdfs | Hanisha Koneru | Hanisha Koneru |
|
||||
| [HDFS-13081](https://issues.apache.org/jira/browse/HDFS-13081) | Datanode#checkSecureConfig should allow SASL and privileged HTTP | Major | datanode, security | Xiaoyu Yao | Ajay Kumar |
|
||||
| [MAPREDUCE-7059](https://issues.apache.org/jira/browse/MAPREDUCE-7059) | Downward Compatibility issue: MR job fails because of unknown setErasureCodingPolicy method from 3.x client to HDFS 2.x cluster | Critical | job submission | Jiandan Yang | Jiandan Yang |
|
||||
| [HADOOP-15275](https://issues.apache.org/jira/browse/HADOOP-15275) | Incorrect javadoc for return type of RetryPolicy#shouldRetry | Minor | documentation | Nanda kumar | Nanda kumar |
|
||||
| [YARN-7511](https://issues.apache.org/jira/browse/YARN-7511) | NPE in ContainerLocalizer when localization failed for running container | Major | nodemanager | Tao Yang | Tao Yang |
|
||||
| [MAPREDUCE-7023](https://issues.apache.org/jira/browse/MAPREDUCE-7023) | TestHadoopArchiveLogs.testCheckFilesAndSeedApps fails on rerun | Minor | test | Gergely Novák | Gergely Novák |
|
||||
| [HDFS-13040](https://issues.apache.org/jira/browse/HDFS-13040) | Kerberized inotify client fails despite kinit properly | Major | namenode | Wei-Chiu Chuang | Xiao Chen |
|
||||
| [YARN-7736](https://issues.apache.org/jira/browse/YARN-7736) | Fix itemization in YARN federation document | Minor | documentation | Akira Ajisaka | Sen Zhao |
|
||||
| [HDFS-13164](https://issues.apache.org/jira/browse/HDFS-13164) | File not closed if streamer fail with DSQuotaExceededException | Major | hdfs-client | Xiao Chen | Xiao Chen |
|
||||
| [HADOOP-15289](https://issues.apache.org/jira/browse/HADOOP-15289) | FileStatus.readFields() assertion incorrect | Critical | . | Steve Loughran | Steve Loughran |
|
||||
| [HDFS-13109](https://issues.apache.org/jira/browse/HDFS-13109) | Support fully qualified hdfs path in EZ commands | Major | hdfs | Hanisha Koneru | Hanisha Koneru |
|
||||
| [HADOOP-15296](https://issues.apache.org/jira/browse/HADOOP-15296) | Fix a wrong link for RBF in the top page | Minor | documentation | Takanobu Asanuma | Takanobu Asanuma |
|
||||
| [HADOOP-15273](https://issues.apache.org/jira/browse/HADOOP-15273) | distcp can't handle remote stores with different checksum algorithms | Critical | tools/distcp | Steve Loughran | Steve Loughran |
|
||||
| [MAPREDUCE-6930](https://issues.apache.org/jira/browse/MAPREDUCE-6930) | mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml | Major | mrv2 | Daniel Templeton | Sen Zhao |
|
||||
| [HDFS-13190](https://issues.apache.org/jira/browse/HDFS-13190) | Document WebHDFS support for snapshot diff | Major | documentation, webhdfs | Xiaoyu Yao | Lokesh Jain |
|
||||
| [HDFS-13244](https://issues.apache.org/jira/browse/HDFS-13244) | Add stack, conf, metrics links to utilities dropdown in NN webUI | Major | . | Bharat Viswanadham | Bharat Viswanadham |
|
||||
| [HDFS-12156](https://issues.apache.org/jira/browse/HDFS-12156) | TestFSImage fails without -Pnative | Major | test | Akira Ajisaka | Akira Ajisaka |
|
||||
| [HDFS-13239](https://issues.apache.org/jira/browse/HDFS-13239) | Fix non-empty dir warning message when setting default EC policy | Minor | . | Hanisha Koneru | Bharat Viswanadham |
|
||||
| [YARN-8022](https://issues.apache.org/jira/browse/YARN-8022) | ResourceManager UI cluster/app/\<app-id\> page fails to render | Blocker | webapp | Tarun Parimi | Tarun Parimi |
|
||||
| [MAPREDUCE-7064](https://issues.apache.org/jira/browse/MAPREDUCE-7064) | Flaky test TestTaskAttempt#testReducerCustomResourceTypes | Major | client, test | Peter Bacsko | Peter Bacsko |
|
||||
| [HDFS-12723](https://issues.apache.org/jira/browse/HDFS-12723) | TestReadStripedFileWithMissingBlocks#testReadFileWithMissingBlocks failing consistently. | Major | . | Rushabh S Shah | Ajay Kumar |
|
||||
| [YARN-7636](https://issues.apache.org/jira/browse/YARN-7636) | Re-reservation count may overflow when cluster resource exhausted for a long time | Major | capacityscheduler | Tao Yang | Tao Yang |
|
||||
| [HDFS-12886](https://issues.apache.org/jira/browse/HDFS-12886) | Ignore minReplication for block recovery | Major | hdfs, namenode | Lukas Majercak | Lukas Majercak |
|
||||
| [HDFS-13296](https://issues.apache.org/jira/browse/HDFS-13296) | GenericTestUtils generates paths with drive letter in Windows and fail webhdfs related test cases | Major | . | Xiao Liang | Xiao Liang |
|
||||
| [HDFS-13268](https://issues.apache.org/jira/browse/HDFS-13268) | TestWebHdfsFileContextMainOperations fails on Windows | Major | . | Íñigo Goiri | Xiao Liang |
|
||||
| [YARN-8054](https://issues.apache.org/jira/browse/YARN-8054) | Improve robustness of the LocalDirsHandlerService MonitoringTimerTask thread | Major | . | Jonathan Eagles | Jonathan Eagles |
|
||||
| [HDFS-13195](https://issues.apache.org/jira/browse/HDFS-13195) | DataNode conf page cannot display the current value after reconfig | Minor | datanode | maobaolong | maobaolong |
|
||||
| [YARN-8063](https://issues.apache.org/jira/browse/YARN-8063) | DistributedShellTimelinePlugin wrongly check for entityId instead of entityType | Major | . | Rohith Sharma K S | Rohith Sharma K S |
|
||||
| [YARN-8062](https://issues.apache.org/jira/browse/YARN-8062) | yarn rmadmin -getGroups returns group from which the user has been removed | Critical | . | Sumana Sathish | Sunil Govindan |
|
||||
| [YARN-8068](https://issues.apache.org/jira/browse/YARN-8068) | Application Priority field causes NPE in app timeline publish when Hadoop 2.7 based clients to 2.8+ | Blocker | yarn | Sunil Govindan | Sunil Govindan |
|
||||
| [YARN-7734](https://issues.apache.org/jira/browse/YARN-7734) | YARN-5418 breaks TestContainerLogsPage.testContainerLogPageAccess | Major | . | Miklos Szegedi | Tao Yang |
|
||||
| [HDFS-13087](https://issues.apache.org/jira/browse/HDFS-13087) | Snapshotted encryption zone information should be immutable | Major | encryption | LiXin Ge | LiXin Ge |
|
||||
| [HADOOP-12862](https://issues.apache.org/jira/browse/HADOOP-12862) | LDAP Group Mapping over SSL can not specify trust store | Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang |
|
||||
| [HDFS-13349](https://issues.apache.org/jira/browse/HDFS-13349) | Unresolved merge conflict in ViewFs.md | Blocker | documentation | Gera Shegalov | Yiqun Lin |
|
||||
| [HADOOP-15317](https://issues.apache.org/jira/browse/HADOOP-15317) | Improve NetworkTopology chooseRandom's loop | Major | . | Xiao Chen | Xiao Chen |
|
||||
| [HADOOP-15355](https://issues.apache.org/jira/browse/HADOOP-15355) | TestCommonConfigurationFields is broken by HADOOP-15312 | Major | test | Konstantin Shvachko | LiXin Ge |
|
||||
| [HDFS-13350](https://issues.apache.org/jira/browse/HDFS-13350) | Negative legacy block ID will confuse Erasure Coding to be considered as striped block | Major | erasure-coding | Lei (Eddy) Xu | Lei (Eddy) Xu |
|
||||
| [YARN-7905](https://issues.apache.org/jira/browse/YARN-7905) | Parent directory permission incorrect during public localization | Critical | . | Bibin A Chundatt | Bilwa S T |
|
||||
| [HDFS-13420](https://issues.apache.org/jira/browse/HDFS-13420) | License header is displayed in ArchivalStorage/MemoryStorage html pages | Minor | documentation | Akira Ajisaka | Akira Ajisaka |
|
||||
| [HDFS-13328](https://issues.apache.org/jira/browse/HDFS-13328) | Abstract ReencryptionHandler recursive logic in separate class. | Major | namenode | Surendra Singh Lilhore | Surendra Singh Lilhore |
|
||||
| [HADOOP-15357](https://issues.apache.org/jira/browse/HADOOP-15357) | Configuration.getPropsWithPrefix no longer does variable substitution | Major | . | Jim Brennan | Jim Brennan |
|
||||
| [MAPREDUCE-7062](https://issues.apache.org/jira/browse/MAPREDUCE-7062) | Update mapreduce.job.tags description for making use for ATSv2 purpose. | Major | . | Charan Hebri | Charan Hebri |
|
||||
| [YARN-8073](https://issues.apache.org/jira/browse/YARN-8073) | TimelineClientImpl doesn't honor yarn.timeline-service.versions configuration | Major | . | Rohith Sharma K S | Rohith Sharma K S |
|
||||
| [HDFS-13427](https://issues.apache.org/jira/browse/HDFS-13427) | Fix the section titles of transparent encryption document | Minor | documentation | Akira Ajisaka | Akira Ajisaka |
|
||||
| [YARN-7527](https://issues.apache.org/jira/browse/YARN-7527) | Over-allocate node resource in async-scheduling mode of CapacityScheduler | Major | capacityscheduler | Tao Yang | Tao Yang |
|
||||
| [YARN-8120](https://issues.apache.org/jira/browse/YARN-8120) | JVM can crash with SIGSEGV when exiting due to custom leveldb logger | Major | nodemanager, resourcemanager | Jason Lowe | Jason Lowe |
|
||||
| [YARN-8147](https://issues.apache.org/jira/browse/YARN-8147) | TestClientRMService#testGetApplications sporadically fails | Major | test | Jason Lowe | Jason Lowe |
|
||||
| [HDFS-13436](https://issues.apache.org/jira/browse/HDFS-13436) | Fix javadoc of package-info.java | Major | documentation | Akira Ajisaka | Akira Ajisaka |
|
||||
| [HADOOP-14970](https://issues.apache.org/jira/browse/HADOOP-14970) | MiniHadoopClusterManager doesn't respect lack of format option | Minor | . | Erik Krogen | Erik Krogen |
|
||||
| [HDFS-13330](https://issues.apache.org/jira/browse/HDFS-13330) | ShortCircuitCache#fetchOrCreate never retries | Major | . | Wei-Chiu Chuang | Gabor Bota |
|
||||
| [YARN-8156](https://issues.apache.org/jira/browse/YARN-8156) | Increase the default value of yarn.timeline-service.app-collector.linger-period.ms | Major | . | Rohith Sharma K S | Charan Hebri |
|
||||
| [YARN-8165](https://issues.apache.org/jira/browse/YARN-8165) | Incorrect queue name logging in AbstractContainerAllocator | Trivial | capacityscheduler | Weiwei Yang | Weiwei Yang |
|
||||
| [HDFS-12828](https://issues.apache.org/jira/browse/HDFS-12828) | OIV ReverseXML Processor fails with escaped characters | Critical | hdfs | Erik Krogen | Erik Krogen |
|
||||
| [HADOOP-15396](https://issues.apache.org/jira/browse/HADOOP-15396) | Some java source files are executable | Minor | . | Akira Ajisaka | Shashikant Banerjee |
|
||||
| [YARN-6827](https://issues.apache.org/jira/browse/YARN-6827) | [ATS1/1.5] NPE exception while publishing recovering applications into ATS during RM restart. | Major | resourcemanager | Rohith Sharma K S | Rohith Sharma K S |
|
||||
| [YARN-7786](https://issues.apache.org/jira/browse/YARN-7786) | NullPointerException while launching ApplicationMaster | Major | . | lujie | lujie |
|
||||
| [HDFS-10183](https://issues.apache.org/jira/browse/HDFS-10183) | Prevent race condition during class initialization | Minor | fs | Pavel Avgustinov | Pavel Avgustinov |
|
||||
| [HDFS-13388](https://issues.apache.org/jira/browse/HDFS-13388) | RequestHedgingProxyProvider calls multiple configured NNs all the time | Major | hdfs-client | Jinglun | Jinglun |
|
||||
| [HDFS-13433](https://issues.apache.org/jira/browse/HDFS-13433) | webhdfs requests can be routed incorrectly in federated cluster | Critical | webhdfs | Arpit Agarwal | Arpit Agarwal |
|
||||
| [HDFS-13408](https://issues.apache.org/jira/browse/HDFS-13408) | MiniDFSCluster to support being built on randomized base directory | Major | test | Xiao Liang | Xiao Liang |
|
||||
| [HADOOP-15390](https://issues.apache.org/jira/browse/HADOOP-15390) | Yarn RM logs flooded by DelegationTokenRenewer trying to renew KMS tokens | Critical | . | Xiao Chen | Xiao Chen |
|
||||
| [HDFS-13336](https://issues.apache.org/jira/browse/HDFS-13336) | Test cases of TestWriteToReplica failed in windows | Major | . | Xiao Liang | Xiao Liang |
|
||||
| [YARN-7598](https://issues.apache.org/jira/browse/YARN-7598) | Document how to use classpath isolation for aux-services in YARN | Major | . | Xuan Gong | Xuan Gong |
|
||||
| [YARN-8183](https://issues.apache.org/jira/browse/YARN-8183) | Fix ConcurrentModificationException inside RMAppAttemptMetrics#convertAtomicLongMaptoLongMap | Critical | yarn | Sumana Sathish | Suma Shivaprasad |
|
||||
| [HADOOP-15411](https://issues.apache.org/jira/browse/HADOOP-15411) | AuthenticationFilter should use Configuration.getPropsWithPrefix instead of iterator | Critical | . | Suma Shivaprasad | Suma Shivaprasad |
|
||||
| [MAPREDUCE-7042](https://issues.apache.org/jira/browse/MAPREDUCE-7042) | Killed MR job data does not move to mapreduce.jobhistory.done-dir when ATS v2 is enabled | Major | . | Yesha Vora | Xuan Gong |
|
||||
| [YARN-8205](https://issues.apache.org/jira/browse/YARN-8205) | Application State is not updated to ATS if AM launching is delayed. | Critical | . | Sumana Sathish | Rohith Sharma K S |
|
||||
| [YARN-8004](https://issues.apache.org/jira/browse/YARN-8004) | Add unit tests for inter queue preemption for dominant resource calculator | Critical | yarn | Sumana Sathish | Zian Chen |
|
||||
| [YARN-8221](https://issues.apache.org/jira/browse/YARN-8221) | RMWebServices also need to honor yarn.resourcemanager.display.per-user-apps | Major | webapp | Sunil Govindan | Sunil Govindan |
|
||||
| [YARN-8210](https://issues.apache.org/jira/browse/YARN-8210) | AMRMClient logging on every heartbeat to track updation of AM RM token causes too many log lines to be generated in AM logs | Major | yarn | Suma Shivaprasad | Suma Shivaprasad |
|
||||
| [HDFS-13509](https://issues.apache.org/jira/browse/HDFS-13509) | Bug fix for breakHardlinks() of ReplicaInfo/LocalReplica, and fix TestFileAppend failures on Windows | Major | . | Xiao Liang | Xiao Liang |
|
||||
| [MAPREDUCE-7073](https://issues.apache.org/jira/browse/MAPREDUCE-7073) | Optimize TokenCache#obtainTokensForNamenodesInternal | Major | . | Bibin A Chundatt | Bibin A Chundatt |
|
||||
| [HADOOP-15406](https://issues.apache.org/jira/browse/HADOOP-15406) | hadoop-nfs dependencies for mockito and junit are not test scope | Major | nfs | Jason Lowe | Jason Lowe |
|
||||
| [YARN-6385](https://issues.apache.org/jira/browse/YARN-6385) | Fix checkstyle warnings in TestFileSystemApplicationHistoryStore | Minor | . | Yiqun Lin | Yiqun Lin |
|
||||
| [YARN-8222](https://issues.apache.org/jira/browse/YARN-8222) | Fix potential NPE when gets RMApp from RM context | Critical | . | Tao Yang | Tao Yang |
|
||||
| [HDFS-13481](https://issues.apache.org/jira/browse/HDFS-13481) | TestRollingFileSystemSinkWithHdfs#testFlushThread: test failed intermittently | Major | hdfs | Gabor Bota | Gabor Bota |
|
||||
| [YARN-8217](https://issues.apache.org/jira/browse/YARN-8217) | RmAuthenticationFilterInitializer /TimelineAuthenticationFilterInitializer should use Configuration.getPropsWithPrefix instead of iterator | Major | . | Suma Shivaprasad | Suma Shivaprasad |
|
||||
| [YARN-8025](https://issues.apache.org/jira/browse/YARN-8025) | UsersManangers#getComputedResourceLimitForActiveUsers throws NPE due to preComputedActiveUserLimit is empty | Major | yarn | Jiandan Yang | Tao Yang |
|
||||
| [YARN-8232](https://issues.apache.org/jira/browse/YARN-8232) | RMContainer lost queue name when RM HA happens | Major | resourcemanager | Hu Ziqian | Hu Ziqian |
|
||||
| [HDFS-13136](https://issues.apache.org/jira/browse/HDFS-13136) | Avoid taking FSN lock while doing group member lookup for FSD permission check | Major | namenode | Xiaoyu Yao | Xiaoyu Yao |
|
||||
| [HDFS-13537](https://issues.apache.org/jira/browse/HDFS-13537) | TestHdfsHelper does not generate jceks path properly for relative path in Windows | Major | . | Xiao Liang | Xiao Liang |
|
||||
| [YARN-7003](https://issues.apache.org/jira/browse/YARN-7003) | DRAINING state of queues is not recovered after RM restart | Major | capacityscheduler | Tao Yang | Tao Yang |
|
||||
| [YARN-8244](https://issues.apache.org/jira/browse/YARN-8244) | TestContainerSchedulerQueuing.testStartMultipleContainers failed | Major | . | Miklos Szegedi | Jim Brennan |
|
||||
| [YARN-8288](https://issues.apache.org/jira/browse/YARN-8288) | Fix wrong number of table columns in Resource Model doc | Major | . | Weiwei Yang | Weiwei Yang |
|
||||
| [HDFS-13539](https://issues.apache.org/jira/browse/HDFS-13539) | DFSStripedInputStream NPE when reportCheckSumFailure | Major | . | Xiao Chen | Xiao Chen |
|
||||
| [YARN-8278](https://issues.apache.org/jira/browse/YARN-8278) | DistributedScheduling is not working in HA | Blocker | . | Bibin A Chundatt | Bibin A Chundatt |
|
||||
| [HDFS-13581](https://issues.apache.org/jira/browse/HDFS-13581) | DN UI logs link is broken when https is enabled | Minor | datanode | Namit Maheshwari | Shashikant Banerjee |
|
||||
| [HDFS-13586](https://issues.apache.org/jira/browse/HDFS-13586) | Fsync fails on directories on Windows | Critical | datanode, hdfs | Lukas Majercak | Lukas Majercak |
|
||||
| [YARN-8179](https://issues.apache.org/jira/browse/YARN-8179) | Preemption does not happen due to natural\_termination\_factor when DRF is used | Major | . | kyungwan nam | kyungwan nam |
|
||||
| [HADOOP-15450](https://issues.apache.org/jira/browse/HADOOP-15450) | Avoid fsync storm triggered by DiskChecker and handle disk full situation | Blocker | . | Kihwal Lee | Arpit Agarwal |
|
||||
| [HDFS-13601](https://issues.apache.org/jira/browse/HDFS-13601) | Optimize ByteString conversions in PBHelper | Major | . | Andrew Wang | Andrew Wang |
|
||||
| [HDFS-13540](https://issues.apache.org/jira/browse/HDFS-13540) | DFSStripedInputStream should only allocate new buffers when reading | Major | . | Xiao Chen | Xiao Chen |
|
||||
| [HDFS-13588](https://issues.apache.org/jira/browse/HDFS-13588) | Fix TestFsDatasetImpl test failures on Windows | Major | . | Xiao Liang | Xiao Liang |
|
||||
| [YARN-8310](https://issues.apache.org/jira/browse/YARN-8310) | Handle old NMTokenIdentifier, AMRMTokenIdentifier, and ContainerTokenIdentifier formats | Major | . | Robert Kanter | Robert Kanter |
|
||||
| [YARN-8344](https://issues.apache.org/jira/browse/YARN-8344) | Missing nm.stop() in TestNodeManagerResync to fix testKillContainersOnResync | Major | . | Giovanni Matteo Fumarola | Giovanni Matteo Fumarola |
|
||||
| [YARN-8327](https://issues.apache.org/jira/browse/YARN-8327) | Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows | Major | log-aggregation | Giovanni Matteo Fumarola | Giovanni Matteo Fumarola |
|
||||
| [YARN-8346](https://issues.apache.org/jira/browse/YARN-8346) | Upgrading to 3.1 kills running containers with error "Opportunistic container queue is full" | Blocker | . | Rohith Sharma K S | Jason Lowe |
|
||||
| [HDFS-13611](https://issues.apache.org/jira/browse/HDFS-13611) | Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient | Major | . | Andrew Wang | Andrew Wang |
|
||||
| [HDFS-13618](https://issues.apache.org/jira/browse/HDFS-13618) | Fix TestDataNodeFaultInjector test failures on Windows | Major | test | Xiao Liang | Xiao Liang |
|
||||
| [HADOOP-15473](https://issues.apache.org/jira/browse/HADOOP-15473) | Configure serialFilter in KeyProvider to avoid UnrecoverableKeyException caused by JDK-8189997 | Critical | kms | Gabor Bota | Gabor Bota |
|
||||
| [YARN-8338](https://issues.apache.org/jira/browse/YARN-8338) | TimelineService V1.5 doesn't come up after HADOOP-15406 | Critical | . | Vinod Kumar Vavilapalli | Vinod Kumar Vavilapalli |
|
||||
|
||||
|
||||
### TESTS:
|
||||
|
||||
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|
||||
|:---- |:---- | :--- |:---- |:---- |:---- |
|
||||
| [HADOOP-15313](https://issues.apache.org/jira/browse/HADOOP-15313) | TestKMS should close providers | Major | kms, test | Xiao Chen | Xiao Chen |
|
||||
| [HDFS-13503](https://issues.apache.org/jira/browse/HDFS-13503) | Fix TestFsck test failures on Windows | Major | hdfs | Xiao Liang | Xiao Liang |
|
||||
| [HDFS-13542](https://issues.apache.org/jira/browse/HDFS-13542) | TestBlockManager#testNeededReplicationWhileAppending fails due to improper cluster shutdown in TestBlockManager#testBlockManagerMachinesArray on Windows | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13551](https://issues.apache.org/jira/browse/HDFS-13551) | TestMiniDFSCluster#testClusterSetStorageCapacity does not shut down cluster | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-11700](https://issues.apache.org/jira/browse/HDFS-11700) | TestHDFSServerPorts#testBackupNodePorts doesn't pass on Windows | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13548](https://issues.apache.org/jira/browse/HDFS-13548) | TestResolveHdfsSymlink#testFcResolveAfs fails on Windows | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13567](https://issues.apache.org/jira/browse/HDFS-13567) | TestNameNodeMetrics#testGenerateEDEKTime,TestNameNodeMetrics#testResourceCheck should use a different cluster basedir | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13557](https://issues.apache.org/jira/browse/HDFS-13557) | TestDFSAdmin#testListOpenFiles fails on Windows | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13550](https://issues.apache.org/jira/browse/HDFS-13550) | TestDebugAdmin#testComputeMetaCommand fails on Windows | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13559](https://issues.apache.org/jira/browse/HDFS-13559) | TestBlockScanner does not close TestContext properly | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13570](https://issues.apache.org/jira/browse/HDFS-13570) | TestQuotaByStorageType,TestQuota,TestDFSOutputStream fail on Windows | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13558](https://issues.apache.org/jira/browse/HDFS-13558) | TestDatanodeHttpXFrame does not shut down cluster | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13554](https://issues.apache.org/jira/browse/HDFS-13554) | TestDatanodeRegistration#testForcedRegistration does not shut down cluster | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13556](https://issues.apache.org/jira/browse/HDFS-13556) | TestNestedEncryptionZones does not shut down cluster | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13560](https://issues.apache.org/jira/browse/HDFS-13560) | Insufficient system resources exist to complete the requested service for some tests on Windows | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13592](https://issues.apache.org/jira/browse/HDFS-13592) | TestNameNodePrunesMissingStorages#testNameNodePrunesUnreportedStorages does not shut down cluster properly | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13593](https://issues.apache.org/jira/browse/HDFS-13593) | TestBlockReaderLocalLegacy#testBlockReaderLocalLegacyWithAppend fails on Windows | Minor | test | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13587](https://issues.apache.org/jira/browse/HDFS-13587) | TestQuorumJournalManager fails on Windows | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13619](https://issues.apache.org/jira/browse/HDFS-13619) | TestAuditLoggerWithCommands fails on Windows | Minor | test | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13620](https://issues.apache.org/jira/browse/HDFS-13620) | Randomize the test directory path for TestHDFSFileSystemContract | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13591](https://issues.apache.org/jira/browse/HDFS-13591) | TestDFSShell#testSetrepLow fails on Windows | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HDFS-13632](https://issues.apache.org/jira/browse/HDFS-13632) | Randomize baseDir for MiniJournalCluster in MiniQJMHACluster for TestDFSAdminWithHA | Minor | . | Anbang Hu | Anbang Hu |
|
||||
|
||||
|
||||
### SUB-TASKS:
|
||||
|
||||
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|
||||
|:---- |:---- | :--- |:---- |:---- |:---- |
|
||||
| [HDFS-13068](https://issues.apache.org/jira/browse/HDFS-13068) | RBF: Add router admin option to manage safe mode | Major | . | Íñigo Goiri | Yiqun Lin |
|
||||
| [HADOOP-15040](https://issues.apache.org/jira/browse/HADOOP-15040) | Upgrade AWS SDK to 1.11.271: NPE bug spams logs w/ Yarn Log Aggregation | Blocker | fs/s3 | Aaron Fabbri | Aaron Fabbri |
|
||||
| [HDFS-13119](https://issues.apache.org/jira/browse/HDFS-13119) | RBF: Manage unavailable clusters | Major | . | Íñigo Goiri | Yiqun Lin |
|
||||
| [HADOOP-15247](https://issues.apache.org/jira/browse/HADOOP-15247) | Move commons-net up to 3.6 | Minor | fs | Steve Loughran | Steve Loughran |
|
||||
| [HDFS-13187](https://issues.apache.org/jira/browse/HDFS-13187) | RBF: Fix Routers information shown in the web UI | Minor | . | Wei Yan | Wei Yan |
|
||||
| [HDFS-13184](https://issues.apache.org/jira/browse/HDFS-13184) | RBF: Improve the unit test TestRouterRPCClientRetries | Minor | test | Yiqun Lin | Yiqun Lin |
|
||||
| [HDFS-13199](https://issues.apache.org/jira/browse/HDFS-13199) | RBF: Fix the hdfs router page missing label icon issue | Major | federation, hdfs | maobaolong | maobaolong |
|
||||
| [HADOOP-15264](https://issues.apache.org/jira/browse/HADOOP-15264) | AWS "shaded" SDK 1.11.271 is pulling in netty 4.1.17 | Blocker | fs/s3 | Steve Loughran | Steve Loughran |
|
||||
| [HADOOP-15090](https://issues.apache.org/jira/browse/HADOOP-15090) | Add ADL troubleshooting doc | Major | documentation, fs/adl | Steve Loughran | Steve Loughran |
|
||||
| [HDFS-13214](https://issues.apache.org/jira/browse/HDFS-13214) | RBF: Complete document of Router configuration | Major | . | Tao Jie | Yiqun Lin |
|
||||
| [HADOOP-15267](https://issues.apache.org/jira/browse/HADOOP-15267) | S3A multipart upload fails when SSE-C encryption is enabled | Critical | fs/s3 | Anis Elleuch | Anis Elleuch |
|
||||
| [HDFS-13230](https://issues.apache.org/jira/browse/HDFS-13230) | RBF: ConnectionManager's cleanup task will compare each pool's own active conns with its total conns | Minor | . | Wei Yan | Chao Sun |
|
||||
| [HDFS-13233](https://issues.apache.org/jira/browse/HDFS-13233) | RBF: MountTableResolver doesn't return the correct mount point of the given path | Major | hdfs | wangzhiyuan | wangzhiyuan |
|
||||
| [HADOOP-15277](https://issues.apache.org/jira/browse/HADOOP-15277) | remove .FluentPropertyBeanIntrospector from CLI operation log output | Minor | conf | Steve Loughran | Steve Loughran |
|
||||
| [HDFS-13212](https://issues.apache.org/jira/browse/HDFS-13212) | RBF: Fix router location cache issue | Major | federation, hdfs | Weiwei Wu | Weiwei Wu |
|
||||
| [HDFS-13232](https://issues.apache.org/jira/browse/HDFS-13232) | RBF: ConnectionPool should return first usable connection | Minor | . | Wei Yan | Ekanth S |
|
||||
| [HDFS-13240](https://issues.apache.org/jira/browse/HDFS-13240) | RBF: Update some inaccurate document descriptions | Minor | . | Yiqun Lin | Yiqun Lin |
|
||||
| [HDFS-11399](https://issues.apache.org/jira/browse/HDFS-11399) | Many tests fails in Windows due to injecting disk failures | Major | . | Yiqun Lin | Yiqun Lin |
|
||||
| [HDFS-13241](https://issues.apache.org/jira/browse/HDFS-13241) | RBF: TestRouterSafemode failed if the port 8888 is in use | Major | hdfs, test | maobaolong | maobaolong |
|
||||
| [HDFS-13253](https://issues.apache.org/jira/browse/HDFS-13253) | RBF: Quota management incorrect parent-child relationship judgement | Major | . | Yiqun Lin | Yiqun Lin |
|
||||
| [HDFS-13226](https://issues.apache.org/jira/browse/HDFS-13226) | RBF: Throw the exception if mount table entry validated failed | Major | hdfs | maobaolong | maobaolong |
|
||||
| [HDFS-12505](https://issues.apache.org/jira/browse/HDFS-12505) | Extend TestFileStatusWithECPolicy with a random EC policy | Major | erasure-coding, test | Takanobu Asanuma | Takanobu Asanuma |
|
||||
| [HDFS-12587](https://issues.apache.org/jira/browse/HDFS-12587) | Use Parameterized tests in TestBlockInfoStriped and TestLowRedundancyBlockQueues to test all EC policies | Major | erasure-coding, test | Takanobu Asanuma | Takanobu Asanuma |
|
||||
| [HADOOP-15308](https://issues.apache.org/jira/browse/HADOOP-15308) | TestConfiguration fails on Windows because of paths | Major | test | Íñigo Goiri | Xiao Liang |
|
||||
| [HDFS-12773](https://issues.apache.org/jira/browse/HDFS-12773) | RBF: Improve State Store FS implementation | Major | . | Íñigo Goiri | Íñigo Goiri |
|
||||
| [HDFS-13198](https://issues.apache.org/jira/browse/HDFS-13198) | RBF: RouterHeartbeatService throws out CachedStateStore related exceptions when starting router | Minor | . | Wei Yan | Wei Yan |
|
||||
| [HDFS-13224](https://issues.apache.org/jira/browse/HDFS-13224) | RBF: Resolvers to support mount points across multiple subclusters | Major | . | Íñigo Goiri | Íñigo Goiri |
|
||||
| [HADOOP-15262](https://issues.apache.org/jira/browse/HADOOP-15262) | AliyunOSS: move files under a directory in parallel when rename a directory | Major | fs/oss | wujinhu | wujinhu |
|
||||
| [HDFS-13215](https://issues.apache.org/jira/browse/HDFS-13215) | RBF: Move Router to its own module | Major | . | Íñigo Goiri | Wei Yan |
|
||||
| [HDFS-13250](https://issues.apache.org/jira/browse/HDFS-13250) | RBF: Router to manage requests across multiple subclusters | Major | . | Íñigo Goiri | Íñigo Goiri |
|
||||
| [HDFS-13318](https://issues.apache.org/jira/browse/HDFS-13318) | RBF: Fix FindBugs in hadoop-hdfs-rbf | Minor | . | Íñigo Goiri | Ekanth S |
|
||||
| [HDFS-12792](https://issues.apache.org/jira/browse/HDFS-12792) | RBF: Test Router-based federation using HDFSContract | Major | . | Íñigo Goiri | Íñigo Goiri |
|
||||
| [YARN-7986](https://issues.apache.org/jira/browse/YARN-7986) | ATSv2 REST API queries do not return results for uppercase application tags | Critical | . | Charan Hebri | Charan Hebri |
|
||||
| [HDFS-12512](https://issues.apache.org/jira/browse/HDFS-12512) | RBF: Add WebHDFS | Major | fs | Íñigo Goiri | Wei Yan |
|
||||
| [HDFS-13291](https://issues.apache.org/jira/browse/HDFS-13291) | RBF: Implement available space based OrderResolver | Major | . | Yiqun Lin | Yiqun Lin |
|
||||
| [HDFS-13204](https://issues.apache.org/jira/browse/HDFS-13204) | RBF: Optimize name service safe mode icon | Minor | . | liuhongtong | liuhongtong |
|
||||
| [HDFS-13352](https://issues.apache.org/jira/browse/HDFS-13352) | RBF: Add xsl stylesheet for hdfs-rbf-default.xml | Major | documentation | Takanobu Asanuma | Takanobu Asanuma |
|
||||
| [HDFS-13347](https://issues.apache.org/jira/browse/HDFS-13347) | RBF: Cache datanode reports | Minor | . | Íñigo Goiri | Íñigo Goiri |
|
||||
| [HDFS-13289](https://issues.apache.org/jira/browse/HDFS-13289) | RBF: TestConnectionManager#testCleanup() test case need correction | Minor | . | Dibyendu Karmakar | Dibyendu Karmakar |
|
||||
| [HDFS-13364](https://issues.apache.org/jira/browse/HDFS-13364) | RBF: Support NamenodeProtocol in the Router | Major | . | Íñigo Goiri | Íñigo Goiri |
|
||||
| [HADOOP-14651](https://issues.apache.org/jira/browse/HADOOP-14651) | Update okhttp version to 2.7.5 | Major | fs/adl | Ray Chiang | Ray Chiang |
|
||||
| [YARN-6936](https://issues.apache.org/jira/browse/YARN-6936) | [Atsv2] Retrospect storing entities into sub application table from client perspective | Major | . | Rohith Sharma K S | Rohith Sharma K S |
|
||||
| [HDFS-13353](https://issues.apache.org/jira/browse/HDFS-13353) | RBF: TestRouterWebHDFSContractCreate failed | Major | test | Takanobu Asanuma | Takanobu Asanuma |
|
||||
| [YARN-8107](https://issues.apache.org/jira/browse/YARN-8107) | Give an informative message when incorrect format is used in ATSv2 filter attributes | Major | ATSv2 | Charan Hebri | Rohith Sharma K S |
|
||||
| [HDFS-13402](https://issues.apache.org/jira/browse/HDFS-13402) | RBF: Fix java doc for StateStoreFileSystemImpl | Minor | hdfs | Yiran Wu | Yiran Wu |
|
||||
| [HDFS-13410](https://issues.apache.org/jira/browse/HDFS-13410) | RBF: Support federation with no subclusters | Minor | . | Íñigo Goiri | Íñigo Goiri |
|
||||
| [HDFS-13384](https://issues.apache.org/jira/browse/HDFS-13384) | RBF: Improve timeout RPC call mechanism | Minor | . | Íñigo Goiri | Íñigo Goiri |
|
||||
| [HDFS-13045](https://issues.apache.org/jira/browse/HDFS-13045) | RBF: Improve error message returned from subcluster | Minor | . | Wei Yan | Íñigo Goiri |
|
||||
| [HDFS-13428](https://issues.apache.org/jira/browse/HDFS-13428) | RBF: Remove LinkedList From StateStoreFileImpl.java | Trivial | federation | BELUGA BEHR | BELUGA BEHR |
|
||||
| [HDFS-13386](https://issues.apache.org/jira/browse/HDFS-13386) | RBF: Wrong date information in list file(-ls) result | Minor | . | Dibyendu Karmakar | Dibyendu Karmakar |
|
||||
| [YARN-8027](https://issues.apache.org/jira/browse/YARN-8027) | Setting hostname of docker container breaks for --net=host in docker 1.13 | Major | yarn | Jim Brennan | Jim Brennan |
|
||||
| [HADOOP-14999](https://issues.apache.org/jira/browse/HADOOP-14999) | AliyunOSS: provide one asynchronous multi-part based uploading mechanism | Major | fs/oss | Genmao Yu | Genmao Yu |
|
||||
| [YARN-7810](https://issues.apache.org/jira/browse/YARN-7810) | TestDockerContainerRuntime test failures due to UID lookup of a non-existent user | Major | . | Shane Kumpf | Shane Kumpf |
|
||||
| [HDFS-13435](https://issues.apache.org/jira/browse/HDFS-13435) | RBF: Improve the error loggings for printing the stack trace | Major | . | Yiqun Lin | Yiqun Lin |
|
||||
| [YARN-7189](https://issues.apache.org/jira/browse/YARN-7189) | Container-executor doesn't remove Docker containers that error out early | Major | yarn | Eric Badger | Eric Badger |
|
||||
| [HDFS-13466](https://issues.apache.org/jira/browse/HDFS-13466) | RBF: Add more router-related information to the UI | Minor | . | Wei Yan | Wei Yan |
|
||||
| [HDFS-13453](https://issues.apache.org/jira/browse/HDFS-13453) | RBF: getMountPointDates should fetch latest subdir time/date when parent dir is not present but /parent/child dirs are present in mount table | Major | . | Dibyendu Karmakar | Dibyendu Karmakar |
|
||||
| [HDFS-13478](https://issues.apache.org/jira/browse/HDFS-13478) | RBF: Disabled Nameservice store API | Major | . | Íñigo Goiri | Íñigo Goiri |
|
||||
| [HDFS-13490](https://issues.apache.org/jira/browse/HDFS-13490) | RBF: Fix setSafeMode in the Router | Major | . | Íñigo Goiri | Íñigo Goiri |
|
||||
| [HDFS-13484](https://issues.apache.org/jira/browse/HDFS-13484) | RBF: Disable Nameservices from the federation | Major | . | Íñigo Goiri | Íñigo Goiri |
|
||||
| [HDFS-13326](https://issues.apache.org/jira/browse/HDFS-13326) | RBF: Improve the interfaces to modify and view mount tables | Minor | . | Wei Yan | Gang Li |
|
||||
| [HDFS-13499](https://issues.apache.org/jira/browse/HDFS-13499) | RBF: Show disabled name services in the UI | Minor | . | Íñigo Goiri | Íñigo Goiri |
|
||||
| [YARN-8215](https://issues.apache.org/jira/browse/YARN-8215) | ATS v2 returns invalid YARN\_CONTAINER\_ALLOCATED\_HOST\_HTTP\_ADDRESS from NM | Critical | ATSv2 | Yesha Vora | Rohith Sharma K S |
|
||||
| [HDFS-13508](https://issues.apache.org/jira/browse/HDFS-13508) | RBF: Normalize paths (automatically) when adding, updating, removing or listing mount table entries | Minor | . | Ekanth S | Ekanth S |
|
||||
| [HDFS-13434](https://issues.apache.org/jira/browse/HDFS-13434) | RBF: Fix dead links in RBF document | Major | documentation | Akira Ajisaka | Chetna Chaudhari |
|
||||
| [YARN-8212](https://issues.apache.org/jira/browse/YARN-8212) | Pending backlog for async allocation threads should be configurable | Major | . | Weiwei Yang | Tao Yang |
|
||||
| [HDFS-13488](https://issues.apache.org/jira/browse/HDFS-13488) | RBF: Reject requests when a Router is overloaded | Major | . | Íñigo Goiri | Íñigo Goiri |
|
||||
| [HDFS-13525](https://issues.apache.org/jira/browse/HDFS-13525) | RBF: Add unit test TestStateStoreDisabledNameservice | Major | . | Yiqun Lin | Yiqun Lin |
|
||||
| [YARN-8253](https://issues.apache.org/jira/browse/YARN-8253) | HTTPS Ats v2 api call fails with "bad HTTP parsed" | Critical | ATSv2 | Yesha Vora | Charan Hebri |
|
||||
| [HADOOP-15454](https://issues.apache.org/jira/browse/HADOOP-15454) | TestRollingFileSystemSinkWithLocal fails on Windows | Major | test | Xiao Liang | Xiao Liang |
|
||||
| [YARN-8247](https://issues.apache.org/jira/browse/YARN-8247) | Incorrect HTTP status code returned by ATSv2 for non-whitelisted users | Critical | ATSv2 | Charan Hebri | Rohith Sharma K S |
|
||||
| [YARN-8130](https://issues.apache.org/jira/browse/YARN-8130) | Race condition when container events are published for KILLED applications | Major | ATSv2 | Charan Hebri | Rohith Sharma K S |
|
||||
| [HADOOP-15498](https://issues.apache.org/jira/browse/HADOOP-15498) | TestHadoopArchiveLogs (#testGenerateScript, #testPrepareWorkingDir) fails on Windows | Minor | . | Anbang Hu | Anbang Hu |
|
||||
| [HADOOP-15497](https://issues.apache.org/jira/browse/HADOOP-15497) | TestTrash should use proper test path to avoid failing on Windows | Minor | . | Anbang Hu | Anbang Hu |
|
||||
|
||||
|
||||
### OTHER:
|
||||
|
||||
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|
||||
|:---- |:---- | :--- |:---- |:---- |:---- |
|
||||
| [HDFS-13052](https://issues.apache.org/jira/browse/HDFS-13052) | WebHDFS: Add support for snasphot diff | Major | . | Lokesh Jain | Lokesh Jain |
|
||||
| [HADOOP-14742](https://issues.apache.org/jira/browse/HADOOP-14742) | Document multi-URI replication Inode for ViewFS | Major | documentation, viewfs | Chris Douglas | Gera Shegalov |
|
||||
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
|
||||
<!---
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
-->
|
||||
# Apache Hadoop 3.0.3 Release Notes
|
||||
|
||||
These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
|
||||
|
||||
|
||||
---
|
||||
|
||||
* [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | *Minor* | **RBF: Use the ZooKeeper as the default State Store**
|
||||
|
||||
Change default State Store from local file to ZooKeeper. This will require additional zk address to be configured.
|
||||
|
||||
|
||||
|
|
@ -51,7 +51,8 @@ public class TestFileSystemStorageStatistics {
|
|||
"bytesReadLocalHost",
|
||||
"bytesReadDistanceOfOneOrTwo",
|
||||
"bytesReadDistanceOfThreeOrFour",
|
||||
"bytesReadDistanceOfFiveOrLarger"
|
||||
"bytesReadDistanceOfFiveOrLarger",
|
||||
"bytesReadErasureCoded"
|
||||
};
|
||||
|
||||
private FileSystem.Statistics statistics =
|
||||
|
@ -74,6 +75,7 @@ public class TestFileSystemStorageStatistics {
|
|||
statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(100));
|
||||
statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(100));
|
||||
statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(100));
|
||||
statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(100));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -126,6 +128,8 @@ public class TestFileSystemStorageStatistics {
|
|||
return statistics.getBytesReadByDistance(3);
|
||||
case "bytesReadDistanceOfFiveOrLarger":
|
||||
return statistics.getBytesReadByDistance(5);
|
||||
case "bytesReadErasureCoded":
|
||||
return statistics.getBytesReadErasureCoded();
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -968,6 +968,160 @@ public class TestFileUtil {
|
|||
Assert.assertFalse(link.exists());
|
||||
}
|
||||
|
||||
/**
|
||||
* This test validates the correctness of
|
||||
* {@link FileUtil#symLink(String, String)} in case of null pointer inputs.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testSymlinkWithNullInput() throws IOException {
|
||||
Assert.assertFalse(del.exists());
|
||||
del.mkdirs();
|
||||
|
||||
File file = new File(del, FILE);
|
||||
File link = new File(del, "_link");
|
||||
|
||||
// Create the same symbolic link
|
||||
// The operation should fail and returns 1
|
||||
int result = FileUtil.symLink(null, null);
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
// Create the same symbolic link
|
||||
// The operation should fail and returns 1
|
||||
result = FileUtil.symLink(file.getAbsolutePath(), null);
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
// Create the same symbolic link
|
||||
// The operation should fail and returns 1
|
||||
result = FileUtil.symLink(null, link.getAbsolutePath());
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
file.delete();
|
||||
link.delete();
|
||||
}
|
||||
|
||||
/**
|
||||
* This test validates the correctness of
|
||||
* {@link FileUtil#symLink(String, String)} in case the file already exists.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testSymlinkFileAlreadyExists() throws IOException {
|
||||
Assert.assertFalse(del.exists());
|
||||
del.mkdirs();
|
||||
|
||||
File file = new File(del, FILE);
|
||||
File link = new File(del, "_link");
|
||||
|
||||
// Create a symbolic link
|
||||
// The operation should succeed
|
||||
int result1 =
|
||||
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
|
||||
|
||||
Assert.assertEquals(0, result1);
|
||||
|
||||
// Create the same symbolic link
|
||||
// The operation should fail and returns 1
|
||||
result1 = FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
|
||||
|
||||
Assert.assertEquals(1, result1);
|
||||
|
||||
file.delete();
|
||||
link.delete();
|
||||
}
|
||||
|
||||
/**
|
||||
* This test validates the correctness of
|
||||
* {@link FileUtil#symLink(String, String)} in case the file and the link are
|
||||
* the same file.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testSymlinkSameFile() throws IOException {
|
||||
Assert.assertFalse(del.exists());
|
||||
del.mkdirs();
|
||||
|
||||
File file = new File(del, FILE);
|
||||
|
||||
// Create a symbolic link
|
||||
// The operation should succeed
|
||||
int result =
|
||||
FileUtil.symLink(file.getAbsolutePath(), file.getAbsolutePath());
|
||||
|
||||
Assert.assertEquals(0, result);
|
||||
|
||||
file.delete();
|
||||
}
|
||||
|
||||
/**
|
||||
* This test validates the correctness of
|
||||
* {@link FileUtil#symLink(String, String)} in case we want to use a link for
|
||||
* 2 different files.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testSymlink2DifferentFile() throws IOException {
|
||||
Assert.assertFalse(del.exists());
|
||||
del.mkdirs();
|
||||
File file = new File(del, FILE);
|
||||
File fileSecond = new File(del, FILE + "_1");
|
||||
File link = new File(del, "_link");
|
||||
|
||||
// Create a symbolic link
|
||||
// The operation should succeed
|
||||
int result =
|
||||
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
|
||||
|
||||
Assert.assertEquals(0, result);
|
||||
|
||||
// The operation should fail and returns 1
|
||||
result =
|
||||
FileUtil.symLink(fileSecond.getAbsolutePath(), link.getAbsolutePath());
|
||||
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
file.delete();
|
||||
fileSecond.delete();
|
||||
link.delete();
|
||||
}
|
||||
|
||||
/**
|
||||
* This test validates the correctness of
|
||||
* {@link FileUtil#symLink(String, String)} in case we want to use a 2
|
||||
* different links for the same file.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testSymlink2DifferentLinks() throws IOException {
|
||||
Assert.assertFalse(del.exists());
|
||||
del.mkdirs();
|
||||
File file = new File(del, FILE);
|
||||
File link = new File(del, "_link");
|
||||
File linkSecond = new File(del, "_link_1");
|
||||
|
||||
// Create a symbolic link
|
||||
// The operation should succeed
|
||||
int result =
|
||||
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
|
||||
|
||||
Assert.assertEquals(0, result);
|
||||
|
||||
// The operation should succeed
|
||||
result =
|
||||
FileUtil.symLink(file.getAbsolutePath(), linkSecond.getAbsolutePath());
|
||||
|
||||
Assert.assertEquals(0, result);
|
||||
|
||||
file.delete();
|
||||
link.delete();
|
||||
linkSecond.delete();
|
||||
}
|
||||
|
||||
private void doUntarAndVerify(File tarFile, File untarDir)
|
||||
throws IOException {
|
||||
if (untarDir.exists() && !FileUtil.fullyDelete(untarDir)) {
|
||||
|
@ -1287,4 +1441,56 @@ public class TestFileUtil {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This test validates the correctness of {@link FileUtil#readLink(File)} in
|
||||
* case of null pointer inputs.
|
||||
*/
|
||||
@Test
|
||||
public void testReadSymlinkWithNullInput() {
|
||||
String result = FileUtil.readLink(null);
|
||||
Assert.assertEquals("", result);
|
||||
}
|
||||
|
||||
/**
|
||||
* This test validates the correctness of {@link FileUtil#readLink(File)}.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testReadSymlink() throws IOException {
|
||||
Assert.assertFalse(del.exists());
|
||||
del.mkdirs();
|
||||
|
||||
File file = new File(del, FILE);
|
||||
File link = new File(del, "_link");
|
||||
|
||||
// Create a symbolic link
|
||||
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
|
||||
|
||||
String result = FileUtil.readLink(link);
|
||||
Assert.assertEquals(file.getAbsolutePath(), result);
|
||||
|
||||
file.delete();
|
||||
link.delete();
|
||||
}
|
||||
|
||||
/**
|
||||
* This test validates the correctness of {@link FileUtil#readLink(File)} when
|
||||
* it gets a file in input.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testReadSymlinkWithAFileAsInput() throws IOException {
|
||||
Assert.assertFalse(del.exists());
|
||||
del.mkdirs();
|
||||
|
||||
File file = new File(del, FILE);
|
||||
|
||||
String result = FileUtil.readLink(file);
|
||||
Assert.assertEquals("", result);
|
||||
|
||||
file.delete();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -230,6 +230,12 @@ public final class RawErasureCoderBenchmark {
|
|||
throw e;
|
||||
} finally {
|
||||
executor.shutdown();
|
||||
if (encoder != null) {
|
||||
encoder.release();
|
||||
}
|
||||
if (decoder != null) {
|
||||
decoder.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.security;
|
||||
|
||||
import javax.security.sasl.Sasl;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
|
||||
/**
|
||||
* Test class for IngressPortBasedResolver.
|
||||
*/
|
||||
public class TestIngressPortBasedResolver {
|
||||
|
||||
/**
|
||||
* A simple test to test that for the configured ports, the resolver
|
||||
* can return the current SASL properties.
|
||||
*/
|
||||
@Test
|
||||
public void testResolver() {
|
||||
Configuration conf = new Configuration();
|
||||
conf.set("ingress.port.sasl.configured.ports", "444,555,666,777");
|
||||
conf.set("ingress.port.sasl.prop.444", "authentication");
|
||||
conf.set("ingress.port.sasl.prop.555", "authentication,privacy");
|
||||
conf.set("ingress.port.sasl.prop.666", "privacy");
|
||||
|
||||
IngressPortBasedResolver resolver = new IngressPortBasedResolver();
|
||||
resolver.setConf(conf);
|
||||
|
||||
// the client address does not matter, give it a null
|
||||
assertEquals("auth",
|
||||
resolver.getServerProperties(null, 444).get(Sasl.QOP));
|
||||
assertEquals("auth,auth-conf",
|
||||
resolver.getServerProperties(null, 555).get(Sasl.QOP));
|
||||
assertEquals("auth-conf",
|
||||
resolver.getServerProperties(null, 666).get(Sasl.QOP));
|
||||
assertEquals("auth-conf",
|
||||
resolver.getServerProperties(null, 777).get(Sasl.QOP));
|
||||
assertEquals("auth",
|
||||
resolver.getServerProperties(null, 888).get(Sasl.QOP));
|
||||
}
|
||||
}
|
|
@ -367,17 +367,31 @@ public class MetricsAsserts {
|
|||
}
|
||||
|
||||
/**
|
||||
* Asserts that the NumOps and quantiles for a metric have been changed at
|
||||
* some point to a non-zero value.
|
||||
* Asserts that the NumOps and quantiles for a metric with value name
|
||||
* "Latency" have been changed at some point to a non-zero value.
|
||||
*
|
||||
* @param prefix of the metric
|
||||
* @param rb MetricsRecordBuilder with the metric
|
||||
*/
|
||||
public static void assertQuantileGauges(String prefix,
|
||||
MetricsRecordBuilder rb) {
|
||||
assertQuantileGauges(prefix, rb, "Latency");
|
||||
}
|
||||
|
||||
/**
|
||||
* Asserts that the NumOps and quantiles for a metric have been changed at
|
||||
* some point to a non-zero value, for the specified value name of the
|
||||
* metrics (e.g., "Latency", "Count").
|
||||
*
|
||||
* @param prefix of the metric
|
||||
* @param rb MetricsRecordBuilder with the metric
|
||||
* @param valueName the value name for the metric
|
||||
*/
|
||||
public static void assertQuantileGauges(String prefix,
|
||||
MetricsRecordBuilder rb, String valueName) {
|
||||
verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0l));
|
||||
for (Quantile q : MutableQuantiles.quantiles) {
|
||||
String nameTemplate = prefix + "%dthPercentileLatency";
|
||||
String nameTemplate = prefix + "%dthPercentile" + valueName;
|
||||
int percentile = (int) (100 * q.quantile);
|
||||
verify(rb).addGauge(
|
||||
eqName(info(String.format(nameTemplate, percentile), "")),
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.hadoop.test.HadoopTestBase;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
public class TestCloseableReferenceCount extends HadoopTestBase {
|
||||
@Test
|
||||
public void testReference() throws ClosedChannelException {
|
||||
CloseableReferenceCount clr = new CloseableReferenceCount();
|
||||
clr.reference();
|
||||
assertEquals("Incorrect reference count", 1, clr.getReferenceCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnreference() throws ClosedChannelException {
|
||||
CloseableReferenceCount clr = new CloseableReferenceCount();
|
||||
clr.reference();
|
||||
clr.reference();
|
||||
assertFalse("New reference count should not equal STATUS_CLOSED_MASK",
|
||||
clr.unreference());
|
||||
assertEquals("Incorrect reference count", 1, clr.getReferenceCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnreferenceCheckClosed() throws ClosedChannelException {
|
||||
CloseableReferenceCount clr = new CloseableReferenceCount();
|
||||
clr.reference();
|
||||
clr.reference();
|
||||
clr.unreferenceCheckClosed();
|
||||
assertEquals("Incorrect reference count", 1, clr.getReferenceCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetClosed() throws ClosedChannelException {
|
||||
CloseableReferenceCount clr = new CloseableReferenceCount();
|
||||
assertTrue("Reference count should be open", clr.isOpen());
|
||||
clr.setClosed();
|
||||
assertFalse("Reference count should be closed", clr.isOpen());
|
||||
}
|
||||
|
||||
@Test(expected = ClosedChannelException.class)
|
||||
public void testReferenceClosedReference() throws ClosedChannelException {
|
||||
CloseableReferenceCount clr = new CloseableReferenceCount();
|
||||
clr.setClosed();
|
||||
assertFalse("Reference count should be closed", clr.isOpen());
|
||||
clr.reference();
|
||||
}
|
||||
|
||||
@Test(expected = ClosedChannelException.class)
|
||||
public void testUnreferenceClosedReference() throws ClosedChannelException {
|
||||
CloseableReferenceCount clr = new CloseableReferenceCount();
|
||||
clr.reference();
|
||||
clr.setClosed();
|
||||
assertFalse("Reference count should be closed", clr.isOpen());
|
||||
clr.unreferenceCheckClosed();
|
||||
}
|
||||
|
||||
@Test(expected = ClosedChannelException.class)
|
||||
public void testDoubleClose() throws ClosedChannelException {
|
||||
CloseableReferenceCount clr = new CloseableReferenceCount();
|
||||
assertTrue("Reference count should be open", clr.isOpen());
|
||||
clr.setClosed();
|
||||
assertFalse("Reference count should be closed", clr.isOpen());
|
||||
clr.setClosed();
|
||||
}
|
||||
}
|
|
@ -137,7 +137,8 @@ public class TestDiskChecker {
|
|||
* @throws java.io.IOException if any
|
||||
*/
|
||||
protected File createTempFile() throws java.io.IOException {
|
||||
File testDir = new File(System.getProperty("test.build.data"));
|
||||
File testDir =
|
||||
new File(System.getProperty("test.build.data", "target/test-dir"));
|
||||
return Files.createTempFile(testDir.toPath(), "test", "tmp").toFile();
|
||||
}
|
||||
|
||||
|
@ -147,7 +148,8 @@ public class TestDiskChecker {
|
|||
* @throws java.io.IOException if any
|
||||
*/
|
||||
protected File createTempDir() throws java.io.IOException {
|
||||
File testDir = new File(System.getProperty("test.build.data"));
|
||||
File testDir =
|
||||
new File(System.getProperty("test.build.data", "target/test-dir"));
|
||||
return Files.createTempDirectory(testDir.toPath(), "test").toFile();
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,193 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* <pre>
|
||||
* Story 1
|
||||
* As a software developer,
|
||||
* I want to use the IntrusiveCollection class;
|
||||
* So that I can save on memory usage during execution.
|
||||
* </pre>
|
||||
*/
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.hadoop.test.HadoopTestBase;
|
||||
import org.apache.hadoop.util.IntrusiveCollection.Element;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
public class TestIntrusiveCollection extends HadoopTestBase {
|
||||
static class SimpleElement implements IntrusiveCollection.Element {
|
||||
private Map<IntrusiveCollection<? extends Element>, Element>
|
||||
prevMap, nextMap;
|
||||
private Map<IntrusiveCollection<? extends Element>, Boolean> isMemberMap;
|
||||
|
||||
public SimpleElement() {
|
||||
prevMap = new HashMap<>();
|
||||
nextMap = new HashMap<>();
|
||||
isMemberMap = new HashMap<>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void insertInternal(IntrusiveCollection<? extends Element> list,
|
||||
Element prev, Element next) {
|
||||
isMemberMap.put(list, true);
|
||||
prevMap.put(list, prev);
|
||||
nextMap.put(list, next);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPrev(IntrusiveCollection<? extends Element> list,
|
||||
Element prev) {
|
||||
prevMap.put(list, prev);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNext(IntrusiveCollection<? extends Element> list,
|
||||
Element next) {
|
||||
nextMap.put(list, next);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeInternal(IntrusiveCollection<? extends Element> list) {
|
||||
prevMap.remove(list);
|
||||
nextMap.remove(list);
|
||||
isMemberMap.remove(list);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Element getPrev(IntrusiveCollection<? extends Element> list) {
|
||||
return prevMap.getOrDefault(list, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Element getNext(IntrusiveCollection<? extends Element> list) {
|
||||
return nextMap.getOrDefault(list, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isInList(IntrusiveCollection<? extends Element> list) {
|
||||
return isMemberMap.getOrDefault(list, false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* <pre>
|
||||
* Scenario S1.1: Adding an element
|
||||
* Given an IntrusiveCollection has been created
|
||||
* and the IntrusiveCollection is empty
|
||||
* When I insert an element
|
||||
* Then the IntrusiveCollection contains the newly added element.
|
||||
* </pre>
|
||||
*/
|
||||
@Test
|
||||
public void testShouldAddElement() {
|
||||
IntrusiveCollection<SimpleElement> intrusiveCollection =
|
||||
new IntrusiveCollection<>();
|
||||
|
||||
SimpleElement element = new SimpleElement();
|
||||
intrusiveCollection.add(element);
|
||||
|
||||
assertFalse("Collection should not be empty",
|
||||
intrusiveCollection.isEmpty());
|
||||
assertTrue("Collection should contain added element",
|
||||
intrusiveCollection.contains(element));
|
||||
}
|
||||
|
||||
/**
|
||||
* <pre>
|
||||
* Scenario S1.2: Removing an element
|
||||
* Given an IntrusiveCollection has been created
|
||||
* and the InstrusiveCollection contains a single element
|
||||
* When I remove the element
|
||||
* Then the IntrusiveCollection is empty.
|
||||
* </pre>
|
||||
*/
|
||||
@Test
|
||||
public void testShouldRemoveElement() {
|
||||
IntrusiveCollection<SimpleElement> intrusiveCollection =
|
||||
new IntrusiveCollection<>();
|
||||
SimpleElement element = new SimpleElement();
|
||||
intrusiveCollection.add(element);
|
||||
|
||||
intrusiveCollection.remove(element);
|
||||
|
||||
assertTrue("Collection should be empty", intrusiveCollection.isEmpty());
|
||||
assertFalse("Collection should not contain removed element",
|
||||
intrusiveCollection.contains(element));
|
||||
}
|
||||
|
||||
/**
|
||||
* <pre>
|
||||
* Scenario S1.3: Removing all elements
|
||||
* Given an IntrusiveCollection has been created
|
||||
* and the IntrusiveCollection contains multiple elements
|
||||
* When I remove all elements
|
||||
* Then the IntrusiveCollection is empty.
|
||||
* </pre>
|
||||
*/
|
||||
@Test
|
||||
public void testShouldRemoveAllElements() {
|
||||
IntrusiveCollection<SimpleElement> intrusiveCollection =
|
||||
new IntrusiveCollection<>();
|
||||
intrusiveCollection.add(new SimpleElement());
|
||||
intrusiveCollection.add(new SimpleElement());
|
||||
intrusiveCollection.add(new SimpleElement());
|
||||
|
||||
intrusiveCollection.clear();
|
||||
|
||||
assertTrue("Collection should be empty", intrusiveCollection.isEmpty());
|
||||
}
|
||||
|
||||
/**
|
||||
* <pre>
|
||||
* Scenario S1.4: Iterating through elements
|
||||
* Given an IntrusiveCollection has been created
|
||||
* and the IntrusiveCollection contains multiple elements
|
||||
* When I iterate through the IntrusiveCollection
|
||||
* Then I get each element in the collection, successively.
|
||||
* </pre>
|
||||
*/
|
||||
@Test
|
||||
public void testIterateShouldReturnAllElements() {
|
||||
IntrusiveCollection<SimpleElement> intrusiveCollection =
|
||||
new IntrusiveCollection<>();
|
||||
SimpleElement elem1 = new SimpleElement();
|
||||
SimpleElement elem2 = new SimpleElement();
|
||||
SimpleElement elem3 = new SimpleElement();
|
||||
intrusiveCollection.add(elem1);
|
||||
intrusiveCollection.add(elem2);
|
||||
intrusiveCollection.add(elem3);
|
||||
|
||||
Iterator<SimpleElement> iterator = intrusiveCollection.iterator();
|
||||
|
||||
assertEquals("First element returned is incorrect", elem1, iterator.next());
|
||||
assertEquals("Second element returned is incorrect", elem2,
|
||||
iterator.next());
|
||||
assertEquals("Third element returned is incorrect", elem3, iterator.next());
|
||||
assertFalse("Iterator should not have next element", iterator.hasNext());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Random;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.hadoop.test.HadoopTestBase;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class TestLimitInputStream extends HadoopTestBase {
|
||||
static class RandomInputStream extends InputStream {
|
||||
private Random rn = new Random(0);
|
||||
|
||||
@Override
|
||||
public int read() { return rn.nextInt(); }
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRead() throws IOException {
|
||||
try (LimitInputStream limitInputStream =
|
||||
new LimitInputStream(new RandomInputStream(), 0)) {
|
||||
assertEquals("Reading byte after reaching limit should return -1", -1,
|
||||
limitInputStream.read());
|
||||
}
|
||||
try (LimitInputStream limitInputStream =
|
||||
new LimitInputStream(new RandomInputStream(), 4)) {
|
||||
assertEquals("Incorrect byte returned", new Random(0).nextInt(),
|
||||
limitInputStream.read());
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = IOException.class)
|
||||
public void testResetWithoutMark() throws IOException {
|
||||
try (LimitInputStream limitInputStream =
|
||||
new LimitInputStream(new RandomInputStream(), 128)) {
|
||||
limitInputStream.reset();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReadBytes() throws IOException {
|
||||
try (LimitInputStream limitInputStream =
|
||||
new LimitInputStream(new RandomInputStream(), 128)) {
|
||||
Random r = new Random(0);
|
||||
byte[] data = new byte[4];
|
||||
byte[] expected = { (byte) r.nextInt(), (byte) r.nextInt(),
|
||||
(byte) r.nextInt(), (byte) r.nextInt() };
|
||||
limitInputStream.read(data, 0, 4);
|
||||
assertArrayEquals("Incorrect bytes returned", expected, data);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -27,6 +27,7 @@ import java.io.File;
|
|||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.ThreadInfo;
|
||||
|
@ -38,6 +39,8 @@ import org.apache.hadoop.fs.FileUtil;
|
|||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
|
||||
import static org.apache.hadoop.util.Shell.*;
|
||||
import static org.junit.Assume.assumeTrue;
|
||||
|
||||
import org.junit.Assume;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
|
@ -528,4 +531,9 @@ public class TestShell extends Assert {
|
|||
public void testIsJavaVersionAtLeast() {
|
||||
assertTrue(Shell.isJavaVersionAtLeast(8));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsBashSupported() throws InterruptedIOException {
|
||||
assumeTrue("Bash is not supported", Shell.checkIsBashSupported());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import static org.junit.Assert.assertArrayEquals;
|
|||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -476,6 +477,32 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
|
|||
executorService.awaitTermination(50, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFormatTimeSortable() {
|
||||
long timeDiff = 523452311;
|
||||
String timeDiffStr = "99hrs, 59mins, 59sec";
|
||||
|
||||
assertEquals("Incorrect time diff string returned", timeDiffStr,
|
||||
StringUtils.formatTimeSortable(timeDiff));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsAlpha() {
|
||||
assertTrue("Reported hello as non-alpha string",
|
||||
StringUtils.isAlpha("hello"));
|
||||
assertFalse("Reported hello1 as alpha string",
|
||||
StringUtils.isAlpha("hello1"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEscapeHTML() {
|
||||
String htmlStr = "<p>Hello. How are you?</p>";
|
||||
String escapedStr = "<p>Hello. How are you?</p>";
|
||||
|
||||
assertEquals("Incorrect escaped HTML string returned",
|
||||
escapedStr, StringUtils.escapeHTML(htmlStr));
|
||||
}
|
||||
|
||||
// Benchmark for StringUtils split
|
||||
public static void main(String []args) {
|
||||
final String TO_SPLIT = "foo,bar,baz,blah,blah";
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.hadoop.test.HadoopTestBase;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class TestUTF8ByteArrayUtils extends HadoopTestBase {
|
||||
@Test
|
||||
public void testFindByte() {
|
||||
byte[] data = "Hello, world!".getBytes();
|
||||
assertEquals("Character 'a' does not exist in string", -1,
|
||||
UTF8ByteArrayUtils.findByte(data, 0, data.length, (byte) 'a'));
|
||||
assertEquals("Did not find first occurrence of character 'o'", 4,
|
||||
UTF8ByteArrayUtils.findByte(data, 0, data.length, (byte) 'o'));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFindBytes() {
|
||||
byte[] data = "Hello, world!".getBytes();
|
||||
assertEquals("Did not find first occurrence of pattern 'ello'", 1,
|
||||
UTF8ByteArrayUtils.findBytes(data, 0, data.length, "ello".getBytes()));
|
||||
assertEquals(
|
||||
"Substring starting at position 2 does not contain pattern 'ello'", -1,
|
||||
UTF8ByteArrayUtils.findBytes(data, 2, data.length, "ello".getBytes()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFindNthByte() {
|
||||
byte[] data = "Hello, world!".getBytes();
|
||||
assertEquals("Did not find 2nd occurrence of character 'l'", 3,
|
||||
UTF8ByteArrayUtils.findNthByte(data, 0, data.length, (byte) 'l', 2));
|
||||
assertEquals("4th occurrence of character 'l' does not exist", -1,
|
||||
UTF8ByteArrayUtils.findNthByte(data, 0, data.length, (byte) 'l', 4));
|
||||
assertEquals("Did not find 3rd occurrence of character 'l'", 10,
|
||||
UTF8ByteArrayUtils.findNthByte(data, (byte) 'l', 3));
|
||||
}
|
||||
}
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
load hadoop-functions_test_helper
|
||||
|
||||
@test "hadoop_stop_daemon" {
|
||||
@test "hadoop_stop_daemon_changing_pid" {
|
||||
old_pid=12345
|
||||
new_pid=54321
|
||||
HADOOP_STOP_TIMEOUT=3
|
||||
|
@ -29,3 +29,25 @@ load hadoop-functions_test_helper
|
|||
[ -f pidfile ]
|
||||
[ "$(cat pidfile)" = "${new_pid}" ]
|
||||
}
|
||||
|
||||
@test "hadoop_stop_daemon_force_kill" {
|
||||
|
||||
HADOOP_STOP_TIMEOUT=4
|
||||
|
||||
# Run the following in a sub-shell so that its termination doesn't affect the test
|
||||
(sh ${TESTBINDIR}/process_with_sigterm_trap.sh ${TMP}/pidfile &)
|
||||
|
||||
# Wait for the process to go into tight loop
|
||||
sleep 1
|
||||
|
||||
[ -f ${TMP}/pidfile ]
|
||||
pid=$(cat "${TMP}/pidfile")
|
||||
|
||||
run hadoop_stop_daemon my_command ${TMP}/pidfile 2>&1
|
||||
|
||||
# The process should no longer be alive
|
||||
! kill -0 ${pid} > /dev/null 2>&1
|
||||
|
||||
# The PID file should be gone
|
||||
[ ! -f ${TMP}/pidfile ]
|
||||
}
|
||||
|
|
|
@ -259,4 +259,39 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.kms.key.authorization.enable</name>
|
||||
<value>true</value>
|
||||
<description>Boolean property to Enable/Disable per Key authorization</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.kms.encrypted.key.cache.size</name>
|
||||
<value>100</value>
|
||||
<description>The size of the cache. This is the maximum number of EEKs that
|
||||
can be cached under each key name.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.kms.encrypted.key.cache.low.watermark</name>
|
||||
<value>0.3</value>
|
||||
<description>A low watermark on the cache. For each key name, if after a get call,
|
||||
the number of cached EEKs are less than (size * low watermark),
|
||||
then the cache under this key name will be filled asynchronously.
|
||||
For each key name, only 1 thread could be running for the asynchronous filling.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.kms.encrypted.key.cache.num.fill.threads</name>
|
||||
<value>2</value>
|
||||
<description>The maximum number of asynchronous threads overall, across key names,
|
||||
allowed to fill the queue in a cache.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.kms.encrypted.key.cache.expiry</name>
|
||||
<value>43200000</value>
|
||||
<description>The cache expiry time, in milliseconds. Internally Guava cache is used as the cache implementation.
|
||||
The expiry approach is expireAfterAccess</description>
|
||||
</property>
|
||||
</configuration>
|
||||
|
|
|
@ -41,12 +41,18 @@ public abstract class Verifier extends RpcAuthInfo {
|
|||
public static Verifier readFlavorAndVerifier(XDR xdr) {
|
||||
AuthFlavor flavor = AuthFlavor.fromValue(xdr.readInt());
|
||||
final Verifier verifer;
|
||||
if(flavor == AuthFlavor.AUTH_NONE) {
|
||||
if (flavor == AuthFlavor.AUTH_NONE) {
|
||||
verifer = new VerifierNone();
|
||||
} else if(flavor == AuthFlavor.RPCSEC_GSS) {
|
||||
} else if (flavor == AuthFlavor.AUTH_SYS) {
|
||||
// Added in HADOOP-15307 based on HDFS-5085:
|
||||
// When the auth flavor is AUTH_SYS, the corresponding verifier is
|
||||
// AUTH_NONE. I.e., it is impossible to have a verifier with auth
|
||||
// flavor AUTH_SYS.
|
||||
verifer = new VerifierNone();
|
||||
} else if (flavor == AuthFlavor.RPCSEC_GSS) {
|
||||
verifer = new VerifierGSS();
|
||||
} else {
|
||||
throw new UnsupportedOperationException("Unsupported verifier flavor"
|
||||
throw new UnsupportedOperationException("Unsupported verifier flavor: "
|
||||
+ flavor);
|
||||
}
|
||||
verifer.read(xdr);
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
HDDS_VERSION=${hdds.version}
|
|
@ -0,0 +1,73 @@
|
|||
<!---
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
# Compose files for local performance tests
|
||||
|
||||
This directory contains docker-compose definition for an ozone cluster where
|
||||
all the metrics are saved to a prometheus instance.
|
||||
|
||||
Prometheus follows a pull based approach where the metrics are published
|
||||
on a HTTP endpoint.
|
||||
|
||||
Our current approach:
|
||||
|
||||
1. A Java agent activates a prometheus metrics endpoint in every JVM instance
|
||||
(use `init.sh` to download the agent)
|
||||
|
||||
2. The Java agent publishes all the jmx parameters in prometheus format AND
|
||||
register the endpoint address to the consul.
|
||||
|
||||
3. Prometheus polls all the endpoints which are registered to consul.
|
||||
|
||||
|
||||
|
||||
## How to use
|
||||
|
||||
First of all download the required Java agent with running `./init.sh`
|
||||
|
||||
After that you can start the cluster with docker-compose:
|
||||
|
||||
```
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
After a while the cluster will be started. You can check the ozone web ui-s:
|
||||
|
||||
https://localhost:9874
|
||||
https://localhost:9876
|
||||
|
||||
You can also scale up the datanodes:
|
||||
|
||||
```
|
||||
docker-compose scale datanode=3
|
||||
```
|
||||
|
||||
Freon (Ozone test generator tool) is not part of docker-compose by default,
|
||||
you can activate it using `compose-all.sh` instead of `docker-compose`:
|
||||
|
||||
```
|
||||
compose-all.sh up -d
|
||||
```
|
||||
|
||||
Now Freon is running. Let's try to check the metrics from the local Prometheus:
|
||||
|
||||
http://localhost:9090/graph
|
||||
|
||||
Example queries:
|
||||
|
||||
```
|
||||
Hadoop_KeySpaceManager_NumKeyCommits
|
||||
rate(Hadoop_KeySpaceManager_NumKeyCommits[10m])
|
||||
rate(Hadoop_Ozone_BYTES_WRITTEN[10m])
|
||||
```
|
|
@ -0,0 +1,18 @@
|
|||
#!/usr/bin/env bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
docker-compose -f docker-compose.yaml -f docker-compose-freon.yaml "$@"
|
|
@ -0,0 +1,26 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
version: "3"
|
||||
services:
|
||||
freon:
|
||||
image: apache/hadoop-runner
|
||||
volumes:
|
||||
- ../../ozone:/opt/hadoop
|
||||
- ./jmxpromo.jar:/opt/jmxpromo.jar
|
||||
env_file:
|
||||
- ./docker-config
|
||||
command: ["/opt/hadoop/bin/ozone","freon"]
|
|
@ -0,0 +1,77 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
version: "3"
|
||||
services:
|
||||
namenode:
|
||||
image: apache/hadoop-runner
|
||||
hostname: namenode
|
||||
volumes:
|
||||
- ../../ozone:/opt/hadoop
|
||||
- ./jmxpromo.jar:/opt/jmxpromo.jar
|
||||
ports:
|
||||
- 9870:9870
|
||||
environment:
|
||||
ENSURE_NAMENODE_DIR: /data/namenode
|
||||
env_file:
|
||||
- ./docker-config
|
||||
command: ["/opt/hadoop/bin/hdfs","namenode"]
|
||||
datanode:
|
||||
image: apache/hadoop-runner
|
||||
volumes:
|
||||
- ../../ozone:/opt/hadoop
|
||||
- ./jmxpromo.jar:/opt/jmxpromo.jar
|
||||
ports:
|
||||
- 9864
|
||||
command: ["/opt/hadoop/bin/ozone","datanode"]
|
||||
env_file:
|
||||
- ./docker-config
|
||||
ksm:
|
||||
image: apache/hadoop-runner
|
||||
volumes:
|
||||
- ../../ozone:/opt/hadoop
|
||||
- ./jmxpromo.jar:/opt/jmxpromo.jar
|
||||
ports:
|
||||
- 9874:9874
|
||||
environment:
|
||||
ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
|
||||
env_file:
|
||||
- ./docker-config
|
||||
command: ["/opt/hadoop/bin/ozone","ksm"]
|
||||
scm:
|
||||
image: apache/hadoop-runner
|
||||
volumes:
|
||||
- ../../ozone:/opt/hadoop
|
||||
- ./jmxpromo.jar:/opt/jmxpromo.jar
|
||||
ports:
|
||||
- 9876:9876
|
||||
env_file:
|
||||
- ./docker-config
|
||||
environment:
|
||||
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
|
||||
command: ["/opt/hadoop/bin/ozone","scm"]
|
||||
consul:
|
||||
image: consul
|
||||
command: ["agent", "-dev", "-ui", "-client", "0.0.0.0"]
|
||||
ports:
|
||||
- 8500:8500
|
||||
prometheus:
|
||||
image: prom/prometheus
|
||||
volumes:
|
||||
- "./prometheus.yml:/etc/prometheus.yml"
|
||||
command: ["--config.file","/etc/prometheus.yml"]
|
||||
ports:
|
||||
- 9090:9090
|
|
@ -0,0 +1,37 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
|
||||
OZONE-SITE.XML_ozone.ksm.address=ksm
|
||||
OZONE-SITE.XML_ozone.scm.names=scm
|
||||
OZONE-SITE.XML_ozone.enabled=True
|
||||
OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id
|
||||
OZONE-SITE.XML_ozone.scm.block.client.address=scm
|
||||
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
|
||||
OZONE-SITE.XML_ozone.handler.type=distributed
|
||||
OZONE-SITE.XML_ozone.scm.client.address=scm
|
||||
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
|
||||
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
|
||||
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
|
||||
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
|
||||
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
|
||||
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
|
||||
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
|
||||
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
|
||||
HADOOP_OPTS=-javaagent:/opt/jmxpromo.jar=port=0:consulHost=consul:consulMode=node
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
#!/usr/bin/env bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
EXPORTER_FILE="$DIR/jmxpromo.jar"
|
||||
if [ ! -f "$EXPORTER_FILE" ]; then
|
||||
wget https://github.com/flokkr/jmxpromo/releases/download/0.11/jmx_prometheus_javaagent-0.11.jar -O $EXPORTER_FILE
|
||||
fi
|
|
@ -0,0 +1,24 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
global:
|
||||
scrape_interval: 15s # By default, scrape targets every 15 seconds.
|
||||
|
||||
scrape_configs:
|
||||
- job_name: jmxexporter
|
||||
consul_sd_configs:
|
||||
- server: consul:8500
|
||||
services:
|
||||
- jmxexporter
|
|
@ -55,6 +55,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
|
|||
private XceiverClientMetrics metrics;
|
||||
private ManagedChannel channel;
|
||||
private final Semaphore semaphore;
|
||||
private boolean closed = false;
|
||||
|
||||
/**
|
||||
* Constructs a client that can communicate with the Container framework on
|
||||
|
@ -105,6 +106,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
|
|||
|
||||
@Override
|
||||
public void close() {
|
||||
closed = true;
|
||||
channel.shutdownNow();
|
||||
try {
|
||||
channel.awaitTermination(60, TimeUnit.MINUTES);
|
||||
|
@ -153,6 +155,14 @@ public class XceiverClientGrpc extends XceiverClientSpi {
|
|||
public CompletableFuture<ContainerCommandResponseProto>
|
||||
sendCommandAsync(ContainerCommandRequestProto request)
|
||||
throws IOException, ExecutionException, InterruptedException {
|
||||
if(closed){
|
||||
throw new IOException("This channel is not connected.");
|
||||
}
|
||||
|
||||
if(channel == null || !isConnected()) {
|
||||
reconnect();
|
||||
}
|
||||
|
||||
final CompletableFuture<ContainerCommandResponseProto> replyFuture =
|
||||
new CompletableFuture<>();
|
||||
semaphore.acquire();
|
||||
|
@ -192,6 +202,19 @@ public class XceiverClientGrpc extends XceiverClientSpi {
|
|||
return replyFuture;
|
||||
}
|
||||
|
||||
private void reconnect() throws IOException {
|
||||
try {
|
||||
connect();
|
||||
} catch (Exception e) {
|
||||
LOG.error("Error while connecting: ", e);
|
||||
throw new IOException(e);
|
||||
}
|
||||
|
||||
if (channel == null || !isConnected()) {
|
||||
throw new IOException("This channel is not connected.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a pipeline.
|
||||
*
|
||||
|
|
|
@ -21,7 +21,7 @@ import com.google.common.base.Preconditions;
|
|||
import org.apache.ratis.shaded.io.netty.channel.Channel;
|
||||
import org.apache.ratis.shaded.io.netty.channel.ChannelHandlerContext;
|
||||
import org.apache.ratis.shaded.io.netty.channel.SimpleChannelInboundHandler;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdds.client;
|
||||
|
||||
import org.apache.commons.lang.builder.ToStringBuilder;
|
||||
import org.apache.commons.lang3.builder.ToStringBuilder;
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
|
||||
|
|
|
@ -39,6 +39,8 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TAGS_SYSTEM_KEY;
|
||||
|
||||
/**
|
||||
* A servlet to print out the running configuration data.
|
||||
*/
|
||||
|
@ -154,7 +156,8 @@ public class HddsConfServlet extends HttpServlet {
|
|||
|
||||
switch (cmd) {
|
||||
case "getOzoneTags":
|
||||
out.write(gson.toJson(config.get("ozone.tags.system").split(",")));
|
||||
out.write(gson.toJson(config.get(OZONE_TAGS_SYSTEM_KEY)
|
||||
.split(",")));
|
||||
break;
|
||||
case "getPropertyByTag":
|
||||
String tags = request.getParameter("tags");
|
||||
|
|
|
@ -32,6 +32,8 @@ import org.apache.hadoop.util.Time;
|
|||
import java.io.IOException;
|
||||
import java.util.Comparator;
|
||||
|
||||
import static java.lang.Math.max;
|
||||
|
||||
/**
|
||||
* Class wraps ozone container info.
|
||||
*/
|
||||
|
@ -60,6 +62,7 @@ public class ContainerInfo
|
|||
private long stateEnterTime;
|
||||
private String owner;
|
||||
private long containerID;
|
||||
private long deleteTransactionId;
|
||||
ContainerInfo(
|
||||
long containerID,
|
||||
HddsProtos.LifeCycleState state,
|
||||
|
@ -68,7 +71,8 @@ public class ContainerInfo
|
|||
long usedBytes,
|
||||
long numberOfKeys,
|
||||
long stateEnterTime,
|
||||
String owner) {
|
||||
String owner,
|
||||
long deleteTransactionId) {
|
||||
this.containerID = containerID;
|
||||
this.pipeline = pipeline;
|
||||
this.allocatedBytes = allocatedBytes;
|
||||
|
@ -78,6 +82,7 @@ public class ContainerInfo
|
|||
this.state = state;
|
||||
this.stateEnterTime = stateEnterTime;
|
||||
this.owner = owner;
|
||||
this.deleteTransactionId = deleteTransactionId;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -96,6 +101,7 @@ public class ContainerInfo
|
|||
builder.setStateEnterTime(info.getStateEnterTime());
|
||||
builder.setOwner(info.getOwner());
|
||||
builder.setContainerID(info.getContainerID());
|
||||
builder.setDeleteTransactionId(info.getDeleteTransactionId());
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
@ -141,6 +147,14 @@ public class ContainerInfo
|
|||
return numberOfKeys;
|
||||
}
|
||||
|
||||
public long getDeleteTransactionId() {
|
||||
return deleteTransactionId;
|
||||
}
|
||||
|
||||
public void updateDeleteTransactionId(long transactionId) {
|
||||
deleteTransactionId = max(transactionId, deleteTransactionId);
|
||||
}
|
||||
|
||||
public ContainerID containerID() {
|
||||
return new ContainerID(getContainerID());
|
||||
}
|
||||
|
@ -174,6 +188,7 @@ public class ContainerInfo
|
|||
builder.setState(state);
|
||||
builder.setStateEnterTime(stateEnterTime);
|
||||
builder.setContainerID(getContainerID());
|
||||
builder.setDeleteTransactionId(deleteTransactionId);
|
||||
|
||||
if (getOwner() != null) {
|
||||
builder.setOwner(getOwner());
|
||||
|
@ -292,6 +307,7 @@ public class ContainerInfo
|
|||
private long stateEnterTime;
|
||||
private String owner;
|
||||
private long containerID;
|
||||
private long deleteTransactionId;
|
||||
|
||||
public Builder setContainerID(long id) {
|
||||
Preconditions.checkState(id >= 0);
|
||||
|
@ -334,10 +350,15 @@ public class ContainerInfo
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder setDeleteTransactionId(long deleteTransactionId) {
|
||||
this.deleteTransactionId = deleteTransactionId;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ContainerInfo build() {
|
||||
return new
|
||||
ContainerInfo(containerID, state, pipeline,
|
||||
allocated, used, keys, stateEnterTime, owner);
|
||||
ContainerInfo(containerID, state, pipeline, allocated,
|
||||
used, keys, stateEnterTime, owner, deleteTransactionId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,8 @@ import org.apache.ratis.util.TimeDuration;
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Unstable
|
||||
public final class OzoneConfigKeys {
|
||||
public static final String OZONE_TAGS_SYSTEM_KEY =
|
||||
"ozone.tags.system";
|
||||
public static final String DFS_CONTAINER_IPC_PORT =
|
||||
"dfs.container.ipc";
|
||||
public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859;
|
||||
|
|
|
@ -114,6 +114,8 @@ public final class OzoneConsts {
|
|||
public static final String OZONE_HANDLER_LOCAL = "local";
|
||||
|
||||
public static final String DELETING_KEY_PREFIX = "#deleting#";
|
||||
public static final String DELETED_KEY_PREFIX = "#deleted#";
|
||||
public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
|
||||
public static final String OPEN_KEY_PREFIX = "#open#";
|
||||
public static final String OPEN_KEY_ID_DELIMINATOR = "#";
|
||||
|
||||
|
|
|
@ -19,19 +19,30 @@ package org.apache.hadoop.utils;
|
|||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Strings;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.ozone.OzoneConsts;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* An utility class to filter levelDB keys.
|
||||
*/
|
||||
public final class MetadataKeyFilters {
|
||||
|
||||
private static KeyPrefixFilter deletingKeyFilter =
|
||||
new MetadataKeyFilters.KeyPrefixFilter(OzoneConsts.DELETING_KEY_PREFIX);
|
||||
new MetadataKeyFilters.KeyPrefixFilter()
|
||||
.addFilter(OzoneConsts.DELETING_KEY_PREFIX);
|
||||
|
||||
private static KeyPrefixFilter deletedKeyFilter =
|
||||
new MetadataKeyFilters.KeyPrefixFilter()
|
||||
.addFilter(OzoneConsts.DELETED_KEY_PREFIX);
|
||||
|
||||
private static KeyPrefixFilter normalKeyFilter =
|
||||
new MetadataKeyFilters.KeyPrefixFilter(OzoneConsts.DELETING_KEY_PREFIX,
|
||||
true);
|
||||
new MetadataKeyFilters.KeyPrefixFilter()
|
||||
.addFilter(OzoneConsts.DELETING_KEY_PREFIX, true)
|
||||
.addFilter(OzoneConsts.DELETED_KEY_PREFIX, true)
|
||||
.addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true);
|
||||
|
||||
private MetadataKeyFilters() {
|
||||
}
|
||||
|
@ -40,6 +51,10 @@ public final class MetadataKeyFilters {
|
|||
return deletingKeyFilter;
|
||||
}
|
||||
|
||||
public static KeyPrefixFilter getDeletedKeyFilter() {
|
||||
return deletedKeyFilter;
|
||||
}
|
||||
|
||||
public static KeyPrefixFilter getNormalKeyFilter() {
|
||||
return normalKeyFilter;
|
||||
}
|
||||
|
@ -72,37 +87,95 @@ public final class MetadataKeyFilters {
|
|||
*/
|
||||
public static class KeyPrefixFilter implements MetadataKeyFilter {
|
||||
|
||||
private String keyPrefix = null;
|
||||
private List<String> positivePrefixList = new ArrayList<>();
|
||||
private List<String> negativePrefixList = new ArrayList<>();
|
||||
private boolean atleastOnePositiveMatch;
|
||||
private int keysScanned = 0;
|
||||
private int keysHinted = 0;
|
||||
private Boolean negative;
|
||||
|
||||
public KeyPrefixFilter(String keyPrefix) {
|
||||
this(keyPrefix, false);
|
||||
public KeyPrefixFilter() {}
|
||||
|
||||
/**
|
||||
* KeyPrefixFilter constructor. It is made of positive and negative prefix
|
||||
* list. PositivePrefixList is the list of prefixes which are accepted
|
||||
* whereas negativePrefixList contains the list of prefixes which are
|
||||
* rejected.
|
||||
*
|
||||
* @param atleastOnePositiveMatch if positive it requires key to be accepted
|
||||
* by atleast one positive filter.
|
||||
*/
|
||||
public KeyPrefixFilter(boolean atleastOnePositiveMatch) {
|
||||
this.atleastOnePositiveMatch = atleastOnePositiveMatch;
|
||||
}
|
||||
|
||||
public KeyPrefixFilter(String keyPrefix, boolean negative) {
|
||||
this.keyPrefix = keyPrefix;
|
||||
this.negative = negative;
|
||||
public KeyPrefixFilter addFilter(String keyPrefix) {
|
||||
addFilter(keyPrefix, false);
|
||||
return this;
|
||||
}
|
||||
|
||||
public KeyPrefixFilter addFilter(String keyPrefix, boolean negative) {
|
||||
Preconditions.checkArgument(!Strings.isNullOrEmpty(keyPrefix),
|
||||
"KeyPrefix is null or empty: " + keyPrefix);
|
||||
// keyPrefix which needs to be added should not be prefix of any opposing
|
||||
// filter already present. If keyPrefix is a negative filter it should not
|
||||
// be a prefix of any positive filter. Nor should any opposing filter be
|
||||
// a prefix of keyPrefix.
|
||||
// For example if b0 is accepted b can not be rejected and
|
||||
// if b is accepted b0 can not be rejected. If these scenarios need to be
|
||||
// handled we need to add priorities.
|
||||
if (negative) {
|
||||
Preconditions.checkArgument(positivePrefixList.stream().noneMatch(
|
||||
prefix -> prefix.startsWith(keyPrefix) || keyPrefix
|
||||
.startsWith(prefix)),
|
||||
"KeyPrefix: " + keyPrefix + " already accepted.");
|
||||
this.negativePrefixList.add(keyPrefix);
|
||||
} else {
|
||||
Preconditions.checkArgument(negativePrefixList.stream().noneMatch(
|
||||
prefix -> prefix.startsWith(keyPrefix) || keyPrefix
|
||||
.startsWith(prefix)),
|
||||
"KeyPrefix: " + keyPrefix + " already rejected.");
|
||||
this.positivePrefixList.add(keyPrefix);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean filterKey(byte[] preKey, byte[] currentKey,
|
||||
byte[] nextKey) {
|
||||
keysScanned++;
|
||||
boolean accept = false;
|
||||
if (Strings.isNullOrEmpty(keyPrefix)) {
|
||||
accept = true;
|
||||
} else {
|
||||
byte [] prefixBytes = keyPrefix.getBytes();
|
||||
if (currentKey != null && prefixMatch(prefixBytes, currentKey)) {
|
||||
if (currentKey == null) {
|
||||
return false;
|
||||
}
|
||||
boolean accept;
|
||||
|
||||
// There are no filters present
|
||||
if (positivePrefixList.isEmpty() && negativePrefixList.isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
accept = !positivePrefixList.isEmpty() && positivePrefixList.stream()
|
||||
.anyMatch(prefix -> {
|
||||
byte[] prefixBytes = DFSUtil.string2Bytes(prefix);
|
||||
return prefixMatch(prefixBytes, currentKey);
|
||||
});
|
||||
if (accept) {
|
||||
keysHinted++;
|
||||
accept = true;
|
||||
} else {
|
||||
accept = false;
|
||||
return true;
|
||||
} else if (atleastOnePositiveMatch) {
|
||||
return false;
|
||||
}
|
||||
|
||||
accept = !negativePrefixList.isEmpty() && negativePrefixList.stream()
|
||||
.allMatch(prefix -> {
|
||||
byte[] prefixBytes = DFSUtil.string2Bytes(prefix);
|
||||
return !prefixMatch(prefixBytes, currentKey);
|
||||
});
|
||||
if (accept) {
|
||||
keysHinted++;
|
||||
return true;
|
||||
}
|
||||
return (negative) ? !accept : accept;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -115,7 +188,7 @@ public final class MetadataKeyFilters {
|
|||
return keysHinted;
|
||||
}
|
||||
|
||||
private boolean prefixMatch(byte[] prefix, byte[] key) {
|
||||
private static boolean prefixMatch(byte[] prefix, byte[] key) {
|
||||
Preconditions.checkNotNull(prefix);
|
||||
Preconditions.checkNotNull(key);
|
||||
if (key.length < prefix.length) {
|
||||
|
|
|
@ -49,12 +49,12 @@ public interface RatisHelper {
|
|||
|
||||
static String toRaftPeerIdString(DatanodeDetails id) {
|
||||
return id.getUuidString() + "_" +
|
||||
id.getPort(DatanodeDetails.Port.Name.RATIS);
|
||||
id.getPort(DatanodeDetails.Port.Name.RATIS).getValue();
|
||||
}
|
||||
|
||||
static String toRaftPeerAddressString(DatanodeDetails id) {
|
||||
return id.getIpAddress() + ":" +
|
||||
id.getPort(DatanodeDetails.Port.Name.RATIS);
|
||||
id.getPort(DatanodeDetails.Port.Name.RATIS).getValue();
|
||||
}
|
||||
|
||||
static RaftPeerId toRaftPeerId(DatanodeDetails id) {
|
||||
|
|
|
@ -146,6 +146,7 @@ message SCMContainerInfo {
|
|||
required uint64 numberOfKeys = 6;
|
||||
optional int64 stateEnterTime = 7;
|
||||
required string owner = 8;
|
||||
optional int64 deleteTransactionId = 9;
|
||||
}
|
||||
|
||||
message GetScmInfoRequestProto {
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.utils;
|
||||
|
||||
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang3.tuple.ImmutablePair;
|
||||
|
@ -81,6 +83,11 @@ public class TestMetadataStore {
|
|||
|
||||
@Before
|
||||
public void init() throws IOException {
|
||||
if (OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(storeImpl)) {
|
||||
// The initialization of RocksDB fails on Windows
|
||||
assumeNotWindows();
|
||||
}
|
||||
|
||||
testDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
|
||||
+ "-" + storeImpl.toLowerCase());
|
||||
|
||||
|
@ -153,10 +160,14 @@ public class TestMetadataStore {
|
|||
|
||||
@After
|
||||
public void cleanup() throws IOException {
|
||||
if (store != null) {
|
||||
store.close();
|
||||
store.destroy();
|
||||
}
|
||||
if (testDir != null) {
|
||||
FileUtils.deleteDirectory(testDir);
|
||||
}
|
||||
}
|
||||
|
||||
private byte[] getBytes(String str) {
|
||||
return str == null ? null :
|
||||
|
@ -313,7 +324,7 @@ public class TestMetadataStore {
|
|||
|
||||
// Filter keys by prefix.
|
||||
// It should returns all "b*" entries.
|
||||
MetadataKeyFilter filter1 = new KeyPrefixFilter("b");
|
||||
MetadataKeyFilter filter1 = new KeyPrefixFilter().addFilter("b");
|
||||
result = store.getRangeKVs(null, 100, filter1);
|
||||
Assert.assertEquals(10, result.size());
|
||||
Assert.assertTrue(result.stream().allMatch(entry ->
|
||||
|
@ -460,4 +471,63 @@ public class TestMetadataStore {
|
|||
|
||||
Assert.assertEquals(8, count.get());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKeyPrefixFilter() throws IOException {
|
||||
List<Map.Entry<byte[], byte[]>> result = null;
|
||||
RuntimeException exception = null;
|
||||
|
||||
try {
|
||||
new KeyPrefixFilter().addFilter("b0", true).addFilter("b");
|
||||
} catch (IllegalArgumentException e) {
|
||||
exception = e;
|
||||
}
|
||||
Assert.assertTrue(
|
||||
exception.getMessage().contains("KeyPrefix: b already rejected"));
|
||||
|
||||
try {
|
||||
new KeyPrefixFilter().addFilter("b0").addFilter("b", true);
|
||||
} catch (IllegalArgumentException e) {
|
||||
exception = e;
|
||||
}
|
||||
Assert.assertTrue(
|
||||
exception.getMessage().contains("KeyPrefix: b already accepted"));
|
||||
|
||||
try {
|
||||
new KeyPrefixFilter().addFilter("b", true).addFilter("b0");
|
||||
} catch (IllegalArgumentException e) {
|
||||
exception = e;
|
||||
}
|
||||
Assert.assertTrue(
|
||||
exception.getMessage().contains("KeyPrefix: b0 already rejected"));
|
||||
|
||||
try {
|
||||
new KeyPrefixFilter().addFilter("b").addFilter("b0", true);
|
||||
} catch (IllegalArgumentException e) {
|
||||
exception = e;
|
||||
}
|
||||
Assert.assertTrue(
|
||||
exception.getMessage().contains("KeyPrefix: b0 already accepted"));
|
||||
|
||||
MetadataKeyFilter filter1 = new KeyPrefixFilter(true)
|
||||
.addFilter("a0")
|
||||
.addFilter("a1")
|
||||
.addFilter("b", true);
|
||||
result = store.getRangeKVs(null, 100, filter1);
|
||||
Assert.assertEquals(2, result.size());
|
||||
Assert.assertTrue(result.stream()
|
||||
.anyMatch(entry -> new String(entry.getKey()).startsWith("a0"))
|
||||
&& result.stream()
|
||||
.anyMatch(entry -> new String(entry.getKey()).startsWith("a1")));
|
||||
|
||||
filter1 = new KeyPrefixFilter(true).addFilter("b", true);
|
||||
result = store.getRangeKVs(null, 100, filter1);
|
||||
Assert.assertEquals(0, result.size());
|
||||
|
||||
filter1 = new KeyPrefixFilter().addFilter("b", true);
|
||||
result = store.getRangeKVs(null, 100, filter1);
|
||||
Assert.assertEquals(10, result.size());
|
||||
Assert.assertTrue(result.stream()
|
||||
.allMatch(entry -> new String(entry.getKey()).startsWith("a")));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,8 @@ import java.util.Map;
|
|||
import java.util.TreeMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import static java.lang.Math.max;
|
||||
|
||||
/**
|
||||
* This class maintains the information about a container in the ozone world.
|
||||
* <p>
|
||||
|
@ -57,6 +59,7 @@ public class ContainerData {
|
|||
* Number of pending deletion blocks in container.
|
||||
*/
|
||||
private int numPendingDeletionBlocks;
|
||||
private long deleteTransactionId;
|
||||
private AtomicLong readBytes;
|
||||
private AtomicLong writeBytes;
|
||||
private AtomicLong readCount;
|
||||
|
@ -78,6 +81,7 @@ public class ContainerData {
|
|||
this.containerID = containerID;
|
||||
this.state = ContainerLifeCycleState.OPEN;
|
||||
this.numPendingDeletionBlocks = 0;
|
||||
this.deleteTransactionId = 0;
|
||||
this.readCount = new AtomicLong(0L);
|
||||
this.readBytes = new AtomicLong(0L);
|
||||
this.writeCount = new AtomicLong(0L);
|
||||
|
@ -101,6 +105,7 @@ public class ContainerData {
|
|||
this.containerID = containerID;
|
||||
this.state = state;
|
||||
this.numPendingDeletionBlocks = 0;
|
||||
this.deleteTransactionId = 0;
|
||||
this.readCount = new AtomicLong(0L);
|
||||
this.readBytes = new AtomicLong(0L);
|
||||
this.writeCount = new AtomicLong(0L);
|
||||
|
@ -285,7 +290,8 @@ public class ContainerData {
|
|||
*
|
||||
* @return String Name.
|
||||
*/
|
||||
// TODO: check the ContainerCache class to see if we are using the ContainerID instead.
|
||||
// TODO: check the ContainerCache class to see if
|
||||
// we are using the ContainerID instead.
|
||||
/*
|
||||
public String getName() {
|
||||
return getContainerID();
|
||||
|
@ -424,6 +430,22 @@ public class ContainerData {
|
|||
return this.numPendingDeletionBlocks;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets deleteTransactionId to latest delete transactionId for the container.
|
||||
*
|
||||
* @param transactionId latest transactionId of the container.
|
||||
*/
|
||||
public void updateDeleteTransactionId(long transactionId) {
|
||||
deleteTransactionId = max(transactionId, deleteTransactionId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the latest deleteTransactionId of the container.
|
||||
*/
|
||||
public long getDeleteTransactionId() {
|
||||
return deleteTransactionId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of bytes read from the container.
|
||||
* @return the number of bytes read from the container.
|
||||
|
|
|
@ -20,6 +20,8 @@ package org.apache.hadoop.ozone.container.common.helpers;
|
|||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerInfo;
|
||||
|
||||
import static java.lang.Math.max;
|
||||
|
||||
/**
|
||||
* Container Report iterates the closed containers and sends a container report
|
||||
* to SCM.
|
||||
|
@ -35,6 +37,7 @@ public class ContainerReport {
|
|||
private long readBytes;
|
||||
private long writeBytes;
|
||||
private long containerID;
|
||||
private long deleteTransactionId;
|
||||
|
||||
public long getContainerID() {
|
||||
return containerID;
|
||||
|
@ -63,6 +66,7 @@ public class ContainerReport {
|
|||
this.readBytes = 0L;
|
||||
this.writeCount = 0L;
|
||||
this.writeBytes = 0L;
|
||||
this.deleteTransactionId = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -96,6 +100,9 @@ public class ContainerReport {
|
|||
if (info.hasWriteBytes()) {
|
||||
report.setWriteBytes(info.getWriteBytes());
|
||||
}
|
||||
if (info.hasDeleteTransactionId()) {
|
||||
report.updateDeleteTransactionId(info.getDeleteTransactionId());
|
||||
}
|
||||
|
||||
report.setContainerID(info.getContainerID());
|
||||
return report;
|
||||
|
@ -186,6 +193,10 @@ public class ContainerReport {
|
|||
this.bytesUsed = bytesUsed;
|
||||
}
|
||||
|
||||
public void updateDeleteTransactionId(long transactionId) {
|
||||
this.deleteTransactionId = max(transactionId, deleteTransactionId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a containerInfo protobuf message from ContainerReports.
|
||||
*
|
||||
|
@ -202,6 +213,7 @@ public class ContainerReport {
|
|||
.setWriteBytes(this.getWriteBytes())
|
||||
.setFinalhash(this.getFinalhash())
|
||||
.setContainerID(this.getContainerID())
|
||||
.setDeleteTransactionId(this.deleteTransactionId)
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,11 +20,13 @@ package org.apache.hadoop.ozone.container.common.impl;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.primitives.Longs;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||
import org.apache.hadoop.hdds.scm.container.common.helpers
|
||||
.StorageContainerException;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
|
||||
|
@ -53,8 +55,6 @@ import org.apache.hadoop.ozone.container.common.interfaces
|
|||
import org.apache.hadoop.ozone.container.common.interfaces
|
||||
.ContainerLocationManager;
|
||||
import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
|
||||
import org.apache.hadoop.ozone.container.common.interfaces
|
||||
.ContainerReportManager;
|
||||
import org.apache.hadoop.ozone.container.common.interfaces.KeyManager;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.utils.MetadataKeyFilters;
|
||||
|
@ -127,10 +127,8 @@ public class ContainerManagerImpl implements ContainerManager {
|
|||
private ChunkManager chunkManager;
|
||||
private KeyManager keyManager;
|
||||
private Configuration conf;
|
||||
private DatanodeDetails datanodeDetails;
|
||||
|
||||
private ContainerDeletionChoosingPolicy containerDeletionChooser;
|
||||
private ContainerReportManager containerReportManager;
|
||||
|
||||
/**
|
||||
* Init call that sets up a container Manager.
|
||||
|
@ -154,7 +152,6 @@ public class ContainerManagerImpl implements ContainerManager {
|
|||
" directories must be greater than zero.");
|
||||
|
||||
this.conf = config;
|
||||
this.datanodeDetails = dnDetails;
|
||||
|
||||
readLock();
|
||||
try {
|
||||
|
@ -203,9 +200,6 @@ public class ContainerManagerImpl implements ContainerManager {
|
|||
}
|
||||
this.locationManager =
|
||||
new ContainerLocationManagerImpl(containerDirs, dataDirs, config);
|
||||
|
||||
this.containerReportManager =
|
||||
new ContainerReportManagerImpl(config);
|
||||
} finally {
|
||||
readUnlock();
|
||||
}
|
||||
|
@ -254,12 +248,18 @@ public class ContainerManagerImpl implements ContainerManager {
|
|||
}
|
||||
containerData = ContainerData.getFromProtBuf(containerDataProto, conf);
|
||||
|
||||
// Initialize pending deletion blocks count in in-memory
|
||||
// container status.
|
||||
// Initialize pending deletion blocks and deleted blocks count in
|
||||
// in-memory containerData.
|
||||
MetadataStore metadata = KeyUtils.getDB(containerData, conf);
|
||||
List<Map.Entry<byte[], byte[]>> underDeletionBlocks = metadata
|
||||
.getSequentialRangeKVs(null, Integer.MAX_VALUE,
|
||||
MetadataKeyFilters.getDeletingKeyFilter());
|
||||
byte[] transactionID = metadata.get(DFSUtil.string2Bytes(
|
||||
OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX + containerID));
|
||||
if (transactionID != null) {
|
||||
containerData
|
||||
.updateDeleteTransactionId(Longs.fromByteArray(transactionID));
|
||||
}
|
||||
containerData.incrPendingDeletionBlocks(underDeletionBlocks.size());
|
||||
|
||||
List<Map.Entry<byte[], byte[]>> liveKeys = metadata
|
||||
|
@ -314,7 +314,8 @@ public class ContainerManagerImpl implements ContainerManager {
|
|||
writeLock();
|
||||
try {
|
||||
if (containerMap.containsKey(containerData.getContainerID())) {
|
||||
LOG.debug("container already exists. {}", containerData.getContainerID());
|
||||
LOG.debug("container already exists. {}",
|
||||
containerData.getContainerID());
|
||||
throw new StorageContainerException("container already exists.",
|
||||
CONTAINER_EXISTS);
|
||||
}
|
||||
|
@ -595,7 +596,8 @@ public class ContainerManagerImpl implements ContainerManager {
|
|||
@Override
|
||||
public void updateContainer(long containerID, ContainerData data,
|
||||
boolean forceUpdate) throws StorageContainerException {
|
||||
Preconditions.checkState(containerID >= 0, "Container ID cannot be negative.");
|
||||
Preconditions.checkState(containerID >= 0,
|
||||
"Container ID cannot be negative.");
|
||||
Preconditions.checkNotNull(data, "Container data cannot be null");
|
||||
FileOutputStream containerStream = null;
|
||||
DigestOutputStream dos = null;
|
||||
|
@ -711,7 +713,7 @@ public class ContainerManagerImpl implements ContainerManager {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns LifeCycle State of the container
|
||||
* Returns LifeCycle State of the container.
|
||||
* @param containerID - Id of the container
|
||||
* @return LifeCycle State of the container
|
||||
* @throws StorageContainerException
|
||||
|
@ -914,7 +916,8 @@ public class ContainerManagerImpl implements ContainerManager {
|
|||
.setWriteCount(container.getWriteCount())
|
||||
.setReadBytes(container.getReadBytes())
|
||||
.setWriteBytes(container.getWriteBytes())
|
||||
.setState(getState(containerId));
|
||||
.setState(getState(containerId))
|
||||
.setDeleteTransactionId(container.getDeleteTransactionId());
|
||||
|
||||
crBuilder.addReports(ciBuilder.build());
|
||||
}
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.container.common.impl;
|
||||
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
||||
import org.apache.hadoop.ozone.container.common.interfaces
|
||||
.ContainerReportManager;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.apache.hadoop.hdds.scm.HddsServerUtil.getScmHeartbeatInterval;
|
||||
|
||||
/**
|
||||
* Class wraps the container report operations on datanode.
|
||||
* // TODO: support incremental/delta container report
|
||||
*/
|
||||
public class ContainerReportManagerImpl implements ContainerReportManager {
|
||||
// Last non-empty container report time
|
||||
private long lastContainerReportTime;
|
||||
private final long containerReportInterval;
|
||||
private final long heartbeatInterval;
|
||||
|
||||
public ContainerReportManagerImpl(Configuration config) {
|
||||
this.lastContainerReportTime = -1;
|
||||
this.containerReportInterval = config.getTimeDuration(
|
||||
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL,
|
||||
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT,
|
||||
TimeUnit.MILLISECONDS);
|
||||
this.heartbeatInterval = getScmHeartbeatInterval(config);
|
||||
}
|
||||
|
||||
public boolean shouldSendContainerReport() {
|
||||
if (lastContainerReportTime < 0) {
|
||||
return true;
|
||||
}
|
||||
// Add a random delay (0~30s) on top of the container report
|
||||
// interval (60s) so tha the SCM is overwhelmed by the container reports
|
||||
// sent in sync.
|
||||
if (Time.monotonicNow() - lastContainerReportTime >
|
||||
(containerReportInterval + getRandomReportDelay())) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private long getRandomReportDelay() {
|
||||
return RandomUtils.nextLong(0, heartbeatInterval);
|
||||
}
|
||||
}
|
|
@ -72,8 +72,10 @@ public class KeyManagerImpl implements KeyManager {
|
|||
*/
|
||||
@Override
|
||||
public void putKey(KeyData data) throws IOException {
|
||||
Preconditions.checkNotNull(data, "KeyData cannot be null for put operation.");
|
||||
Preconditions.checkState(data.getContainerID() >= 0, "Container ID cannot be negative");
|
||||
Preconditions.checkNotNull(data,
|
||||
"KeyData cannot be null for put operation.");
|
||||
Preconditions.checkState(data.getContainerID() >= 0,
|
||||
"Container ID cannot be negative");
|
||||
containerManager.readLock();
|
||||
try {
|
||||
// We are not locking the key manager since LevelDb serializes all actions
|
||||
|
@ -169,8 +171,10 @@ public class KeyManagerImpl implements KeyManager {
|
|||
public List<KeyData> listKey(
|
||||
long containerID, long startLocalID, int count)
|
||||
throws IOException {
|
||||
Preconditions.checkState(containerID >= 0, "Container ID cannot be negative");
|
||||
Preconditions.checkState(startLocalID >= 0, "startLocal ID cannot be negative");
|
||||
Preconditions.checkState(containerID >= 0,
|
||||
"Container ID cannot be negative");
|
||||
Preconditions.checkState(startLocalID >= 0,
|
||||
"startLocal ID cannot be negative");
|
||||
Preconditions.checkArgument(count > 0,
|
||||
"Count must be a positive number.");
|
||||
ContainerData cData = containerManager.readContainer(containerID);
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.report;
|
||||
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
|
||||
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
|
||||
/**
|
||||
* Publishes ContainerReport which will be sent to SCM as part of heartbeat.
|
||||
* ContainerReport consist of the following information about each containers:
|
||||
* - containerID
|
||||
* - size
|
||||
* - used
|
||||
* - keyCount
|
||||
* - readCount
|
||||
* - writeCount
|
||||
* - readBytes
|
||||
* - writeBytes
|
||||
* - finalHash
|
||||
* - LifeCycleState
|
||||
*
|
||||
*/
|
||||
public class ContainerReportPublisher extends
|
||||
ReportPublisher<ContainerReportsProto> {
|
||||
|
||||
private Long containerReportInterval = null;
|
||||
|
||||
@Override
|
||||
protected long getReportFrequency() {
|
||||
if (containerReportInterval == null) {
|
||||
containerReportInterval = getConf().getTimeDuration(
|
||||
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL,
|
||||
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT,
|
||||
TimeUnit.MILLISECONDS);
|
||||
}
|
||||
// Add a random delay (0~30s) on top of the container report
|
||||
// interval (60s) so tha the SCM is overwhelmed by the container reports
|
||||
// sent in sync.
|
||||
return containerReportInterval + getRandomReportDelay();
|
||||
}
|
||||
|
||||
private long getRandomReportDelay() {
|
||||
return RandomUtils.nextLong(0, containerReportInterval);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ContainerReportsProto getReport() {
|
||||
return ContainerReportsProto.getDefaultInstance();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.report;
|
||||
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.NodeReportProto;
|
||||
|
||||
/**
|
||||
* Publishes NodeReport which will be sent to SCM as part of heartbeat.
|
||||
* NodeReport consist of:
|
||||
* - NodeIOStats
|
||||
* - VolumeReports
|
||||
*/
|
||||
public class NodeReportPublisher extends ReportPublisher<NodeReportProto> {
|
||||
|
||||
@Override
|
||||
protected long getReportFrequency() {
|
||||
return 90000L;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeReportProto getReport() {
|
||||
return NodeReportProto.getDefaultInstance();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.report;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import com.google.protobuf.GeneratedMessage;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
|
||||
import org.apache.hadoop.util.concurrent.HadoopExecutors;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
|
||||
/**
|
||||
* ReportManager is responsible for managing all the {@link ReportPublisher}
|
||||
* and also provides {@link ScheduledExecutorService} to ReportPublisher
|
||||
* which should be used for scheduling the reports.
|
||||
*/
|
||||
public final class ReportManager {
|
||||
|
||||
private final StateContext context;
|
||||
private final List<ReportPublisher> publishers;
|
||||
private final ScheduledExecutorService executorService;
|
||||
|
||||
/**
|
||||
* Construction of {@link ReportManager} should be done via
|
||||
* {@link ReportManager.Builder}.
|
||||
*
|
||||
* @param context StateContext which holds the report
|
||||
* @param publishers List of publishers which generates report
|
||||
*/
|
||||
private ReportManager(StateContext context,
|
||||
List<ReportPublisher> publishers) {
|
||||
this.context = context;
|
||||
this.publishers = publishers;
|
||||
this.executorService = HadoopExecutors.newScheduledThreadPool(
|
||||
publishers.size(),
|
||||
new ThreadFactoryBuilder().setDaemon(true)
|
||||
.setNameFormat("Datanode ReportManager Thread - %d").build());
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes ReportManager, also initializes all the configured
|
||||
* report publishers.
|
||||
*/
|
||||
public void init() {
|
||||
for (ReportPublisher publisher : publishers) {
|
||||
publisher.init(context, executorService);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown the ReportManager.
|
||||
*/
|
||||
public void shutdown() {
|
||||
executorService.shutdown();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns new {@link ReportManager.Builder} which can be used to construct.
|
||||
* {@link ReportManager}
|
||||
* @param conf - Conf
|
||||
* @return builder - Builder.
|
||||
*/
|
||||
public static Builder newBuilder(Configuration conf) {
|
||||
return new Builder(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Builder to construct {@link ReportManager}.
|
||||
*/
|
||||
public static final class Builder {
|
||||
|
||||
private StateContext stateContext;
|
||||
private List<ReportPublisher> reportPublishers;
|
||||
private ReportPublisherFactory publisherFactory;
|
||||
|
||||
|
||||
private Builder(Configuration conf) {
|
||||
this.reportPublishers = new ArrayList<>();
|
||||
this.publisherFactory = new ReportPublisherFactory(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the {@link StateContext}.
|
||||
*
|
||||
* @param context StateContext
|
||||
|
||||
* @return ReportManager.Builder
|
||||
*/
|
||||
public Builder setStateContext(StateContext context) {
|
||||
stateContext = context;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds publisher for the corresponding report.
|
||||
*
|
||||
* @param report report for which publisher needs to be added
|
||||
*
|
||||
* @return ReportManager.Builder
|
||||
*/
|
||||
public Builder addPublisherFor(Class<? extends GeneratedMessage> report) {
|
||||
reportPublishers.add(publisherFactory.getPublisherFor(report));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds new ReportPublisher to the ReportManager.
|
||||
*
|
||||
* @param publisher ReportPublisher
|
||||
*
|
||||
* @return ReportManager.Builder
|
||||
*/
|
||||
public Builder addPublisher(ReportPublisher publisher) {
|
||||
reportPublishers.add(publisher);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build and returns ReportManager.
|
||||
*
|
||||
* @return {@link ReportManager}
|
||||
*/
|
||||
public ReportManager build() {
|
||||
Preconditions.checkNotNull(stateContext);
|
||||
return new ReportManager(stateContext, reportPublishers);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.report;
|
||||
|
||||
import com.google.protobuf.GeneratedMessage;
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.ozone.container.common.statemachine
|
||||
.DatanodeStateMachine.DatanodeStates;
|
||||
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
|
||||
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Abstract class responsible for scheduling the reports based on the
|
||||
* configured interval. All the ReportPublishers should extend this class.
|
||||
*/
|
||||
public abstract class ReportPublisher<T extends GeneratedMessage>
|
||||
implements Configurable, Runnable {
|
||||
|
||||
private Configuration config;
|
||||
private StateContext context;
|
||||
private ScheduledExecutorService executor;
|
||||
|
||||
/**
|
||||
* Initializes ReportPublisher with stateContext and executorService.
|
||||
*
|
||||
* @param stateContext Datanode state context
|
||||
* @param executorService ScheduledExecutorService to schedule reports
|
||||
*/
|
||||
public void init(StateContext stateContext,
|
||||
ScheduledExecutorService executorService) {
|
||||
this.context = stateContext;
|
||||
this.executor = executorService;
|
||||
this.executor.schedule(this,
|
||||
getReportFrequency(), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setConf(Configuration conf) {
|
||||
config = conf;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Configuration getConf() {
|
||||
return config;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
publishReport();
|
||||
if (!executor.isShutdown() ||
|
||||
!(context.getState() == DatanodeStates.SHUTDOWN)) {
|
||||
executor.schedule(this,
|
||||
getReportFrequency(), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates and publishes the report to datanode state context.
|
||||
*/
|
||||
private void publishReport() {
|
||||
context.addReport(getReport());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the frequency in which this particular report has to be scheduled.
|
||||
*
|
||||
* @return report interval in milliseconds
|
||||
*/
|
||||
protected abstract long getReportFrequency();
|
||||
|
||||
/**
|
||||
* Generate and returns the report which has to be sent as part of heartbeat.
|
||||
*
|
||||
* @return datanode report
|
||||
*/
|
||||
protected abstract T getReport();
|
||||
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.report;
|
||||
|
||||
import com.google.protobuf.GeneratedMessage;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.NodeReportProto;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Factory class to construct {@link ReportPublisher} for a report.
|
||||
*/
|
||||
public class ReportPublisherFactory {
|
||||
|
||||
private final Configuration conf;
|
||||
private final Map<Class<? extends GeneratedMessage>,
|
||||
Class<? extends ReportPublisher>> report2publisher;
|
||||
|
||||
/**
|
||||
* Constructs {@link ReportPublisherFactory} instance.
|
||||
*
|
||||
* @param conf Configuration to be passed to the {@link ReportPublisher}
|
||||
*/
|
||||
public ReportPublisherFactory(Configuration conf) {
|
||||
this.conf = conf;
|
||||
this.report2publisher = new HashMap<>();
|
||||
|
||||
report2publisher.put(NodeReportProto.class, NodeReportPublisher.class);
|
||||
report2publisher.put(ContainerReportsProto.class,
|
||||
ContainerReportPublisher.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the ReportPublisher for the corresponding report.
|
||||
*
|
||||
* @param report report
|
||||
*
|
||||
* @return report publisher
|
||||
*/
|
||||
public ReportPublisher getPublisherFor(
|
||||
Class<? extends GeneratedMessage> report) {
|
||||
Class<? extends ReportPublisher> publisherClass =
|
||||
report2publisher.get(report);
|
||||
if (publisherClass == null) {
|
||||
throw new RuntimeException("No publisher found for report " + report);
|
||||
}
|
||||
return ReflectionUtils.newInstance(publisherClass, conf);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.container.common.report;
|
||||
/**
|
||||
* Datanode Reports: As part of heartbeat, datanode has to share its current
|
||||
* state with SCM. The state of datanode is split into multiple reports which
|
||||
* are sent along with heartbeat in a configured frequency.
|
||||
*
|
||||
* This package contains code which is responsible for sending reports from
|
||||
* datanode to SCM.
|
||||
*
|
||||
* ReportPublisherFactory: Given a report this constructs corresponding
|
||||
* {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher}.
|
||||
*
|
||||
* ReportManager: Manages and initializes all the available ReportPublishers.
|
||||
*
|
||||
* ReportPublisher: Abstract class responsible for scheduling the reports
|
||||
* based on the configured interval. All the ReportPublishers should extend
|
||||
* {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher}
|
||||
*
|
||||
* How to add new report:
|
||||
*
|
||||
* 1. Create a new ReportPublisher class which extends
|
||||
* {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher}.
|
||||
*
|
||||
* 2. Add a mapping Report to ReportPublisher entry in ReportPublisherFactory.
|
||||
*
|
||||
* 3. In DatanodeStateMachine add the report to ReportManager instance.
|
||||
*
|
||||
*
|
||||
*
|
||||
* Datanode Reports State Diagram:
|
||||
*
|
||||
* DatanodeStateMachine ReportManager ReportPublisher SCM
|
||||
* | | | |
|
||||
* | | | |
|
||||
* | construct | | |
|
||||
* |----------------->| | |
|
||||
* | | | |
|
||||
* | init | | |
|
||||
* |----------------->| | |
|
||||
* | | init | |
|
||||
* | |------------->| |
|
||||
* | | | |
|
||||
* +--------+------------------+--------------+--------------------+------+
|
||||
* |loop | | | | |
|
||||
* | | | publish | | |
|
||||
* | |<-----------------+--------------| | |
|
||||
* | | | report | | |
|
||||
* | | | | | |
|
||||
* | | | | | |
|
||||
* | | heartbeat(rpc) | | | |
|
||||
* | |------------------+--------------+------------------->| |
|
||||
* | | | | | |
|
||||
* | | | | | |
|
||||
* +--------+------------------+--------------+--------------------+------+
|
||||
* | | | |
|
||||
* | | | |
|
||||
* | | | |
|
||||
* | shutdown | | |
|
||||
* |----------------->| | |
|
||||
* | | | |
|
||||
* | | | |
|
||||
* - - - -
|
||||
*/
|
|
@ -21,7 +21,13 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||
import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CloseContainerCommandHandler;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.NodeReportProto;
|
||||
import org.apache.hadoop.ozone.container.common.report.ReportManager;
|
||||
import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
|
||||
.CloseContainerCommandHandler;
|
||||
import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
|
||||
.CommandDispatcher;
|
||||
import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
|
||||
|
@ -56,6 +62,7 @@ public class DatanodeStateMachine implements Closeable {
|
|||
private final OzoneContainer container;
|
||||
private DatanodeDetails datanodeDetails;
|
||||
private final CommandDispatcher commandDispatcher;
|
||||
private final ReportManager reportManager;
|
||||
private long commandsHandled;
|
||||
private AtomicLong nextHB;
|
||||
private Thread stateMachineThread = null;
|
||||
|
@ -92,6 +99,12 @@ public class DatanodeStateMachine implements Closeable {
|
|||
.setContainer(container)
|
||||
.setContext(context)
|
||||
.build();
|
||||
|
||||
reportManager = ReportManager.newBuilder(conf)
|
||||
.setStateContext(context)
|
||||
.addPublisherFor(NodeReportProto.class)
|
||||
.addPublisherFor(ContainerReportsProto.class)
|
||||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -125,12 +138,12 @@ public class DatanodeStateMachine implements Closeable {
|
|||
long now = 0;
|
||||
|
||||
container.start();
|
||||
reportManager.init();
|
||||
initCommandHandlerThread(conf);
|
||||
while (context.getState() != DatanodeStates.SHUTDOWN) {
|
||||
try {
|
||||
LOG.debug("Executing cycle Number : {}", context.getExecutionCount());
|
||||
nextHB.set(Time.monotonicNow() + heartbeatFrequency);
|
||||
context.setNodeReport(container.getNodeReport());
|
||||
context.execute(executorService, heartbeatFrequency,
|
||||
TimeUnit.MILLISECONDS);
|
||||
now = Time.monotonicNow();
|
||||
|
@ -307,6 +320,7 @@ public class DatanodeStateMachine implements Closeable {
|
|||
public synchronized void stopDaemon() {
|
||||
try {
|
||||
context.setState(DatanodeStates.SHUTDOWN);
|
||||
reportManager.shutdown();
|
||||
this.close();
|
||||
LOG.info("Ozone container server stopped.");
|
||||
} catch (IOException e) {
|
||||
|
@ -389,4 +403,13 @@ public class DatanodeStateMachine implements Closeable {
|
|||
public long getCommandHandled() {
|
||||
return commandsHandled;
|
||||
}
|
||||
|
||||
/**
|
||||
* returns the Command Dispatcher.
|
||||
* @return CommandDispatcher
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public CommandDispatcher getCommandDispatcher() {
|
||||
return commandDispatcher;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,9 +16,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.ozone.container.common.statemachine;
|
||||
|
||||
import com.google.protobuf.GeneratedMessage;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.NodeReportProto;
|
||||
import org.apache.hadoop.ozone.container.common.states.DatanodeState;
|
||||
import org.apache.hadoop.ozone.container.common.states.datanode
|
||||
.InitDatanodeState;
|
||||
|
@ -28,7 +27,9 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
@ -51,8 +52,8 @@ public class StateContext {
|
|||
private final DatanodeStateMachine parent;
|
||||
private final AtomicLong stateExecutionCount;
|
||||
private final Configuration conf;
|
||||
private final Queue<GeneratedMessage> reports;
|
||||
private DatanodeStateMachine.DatanodeStates state;
|
||||
private NodeReportProto dnReport;
|
||||
|
||||
/**
|
||||
* Constructs a StateContext.
|
||||
|
@ -67,9 +68,9 @@ public class StateContext {
|
|||
this.state = state;
|
||||
this.parent = parent;
|
||||
commandQueue = new LinkedList<>();
|
||||
reports = new LinkedList<>();
|
||||
lock = new ReentrantLock();
|
||||
stateExecutionCount = new AtomicLong(0);
|
||||
dnReport = NodeReportProto.getDefaultInstance();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -141,19 +142,53 @@ public class StateContext {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the node report of the datanode state context.
|
||||
* @return the node report.
|
||||
* Adds the report to report queue.
|
||||
*
|
||||
* @param report report to be added
|
||||
*/
|
||||
public NodeReportProto getNodeReport() {
|
||||
return dnReport;
|
||||
public void addReport(GeneratedMessage report) {
|
||||
synchronized (reports) {
|
||||
reports.add(report);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the storage location report of the datanode state context.
|
||||
* @param nodeReport node report
|
||||
* Returns the next report, or null if the report queue is empty.
|
||||
*
|
||||
* @return report
|
||||
*/
|
||||
public void setNodeReport(NodeReportProto nodeReport) {
|
||||
this.dnReport = nodeReport;
|
||||
public GeneratedMessage getNextReport() {
|
||||
synchronized (reports) {
|
||||
return reports.poll();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all the available reports from the report queue, or empty list if
|
||||
* the queue is empty.
|
||||
*
|
||||
* @return List<reports>
|
||||
*/
|
||||
public List<GeneratedMessage> getAllAvailableReports() {
|
||||
return getReports(Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns available reports from the report queue with a max limit on
|
||||
* list size, or empty list if the queue is empty.
|
||||
*
|
||||
* @return List<reports>
|
||||
*/
|
||||
public List<GeneratedMessage> getReports(int maxLimit) {
|
||||
List<GeneratedMessage> results = new ArrayList<>();
|
||||
synchronized (reports) {
|
||||
GeneratedMessage report = reports.poll();
|
||||
while(results.size() < maxLimit && report != null) {
|
||||
results.add(report);
|
||||
report = reports.poll();
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -175,8 +175,8 @@ public class BlockDeletingService extends BackgroundService{
|
|||
// Scan container's db and get list of under deletion blocks
|
||||
MetadataStore meta = KeyUtils.getDB(containerData, conf);
|
||||
// # of blocks to delete is throttled
|
||||
KeyPrefixFilter filter = new KeyPrefixFilter(
|
||||
OzoneConsts.DELETING_KEY_PREFIX);
|
||||
KeyPrefixFilter filter =
|
||||
new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX);
|
||||
List<Map.Entry<byte[], byte[]>> toDeleteBlocks =
|
||||
meta.getSequentialRangeKVs(null, blockLimitPerTask, filter);
|
||||
if (toDeleteBlocks.isEmpty()) {
|
||||
|
@ -214,10 +214,16 @@ public class BlockDeletingService extends BackgroundService{
|
|||
}
|
||||
});
|
||||
|
||||
// Once files are deleted ... clean up DB
|
||||
// Once files are deleted... replace deleting entries with deleted entries
|
||||
BatchOperation batch = new BatchOperation();
|
||||
succeedBlocks.forEach(entry ->
|
||||
batch.delete(DFSUtil.string2Bytes(entry)));
|
||||
succeedBlocks.forEach(entry -> {
|
||||
String blockId =
|
||||
entry.substring(OzoneConsts.DELETING_KEY_PREFIX.length());
|
||||
String deletedEntry = OzoneConsts.DELETED_KEY_PREFIX + blockId;
|
||||
batch.put(DFSUtil.string2Bytes(deletedEntry),
|
||||
DFSUtil.string2Bytes(blockId));
|
||||
batch.delete(DFSUtil.string2Bytes(entry));
|
||||
});
|
||||
meta.writeBatch(batch);
|
||||
// update count of pending deletion blocks in in-memory container status
|
||||
containerManager.decrPendingDeletionBlocks(succeedBlocks.size(),
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
|
||||
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
|
@ -29,6 +31,8 @@ import org.apache.hadoop.util.Time;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
/**
|
||||
* Handler for close container command received from SCM.
|
||||
*/
|
||||
|
@ -67,8 +71,23 @@ public class CloseContainerCommandHandler implements CommandHandler {
|
|||
CloseContainerCommandProto
|
||||
.parseFrom(command.getProtoBufMessage());
|
||||
containerID = closeContainerProto.getContainerID();
|
||||
HddsProtos.ReplicationType replicationType =
|
||||
closeContainerProto.getReplicationType();
|
||||
|
||||
container.getContainerManager().closeContainer(containerID);
|
||||
ContainerProtos.CloseContainerRequestProto.Builder closeRequest =
|
||||
ContainerProtos.CloseContainerRequestProto.newBuilder();
|
||||
closeRequest.setContainerID(containerID);
|
||||
|
||||
ContainerProtos.ContainerCommandRequestProto.Builder request =
|
||||
ContainerProtos.ContainerCommandRequestProto.newBuilder();
|
||||
request.setCmdType(ContainerProtos.Type.CloseContainer);
|
||||
request.setCloseContainer(closeRequest);
|
||||
request.setTraceID(UUID.randomUUID().toString());
|
||||
request.setDatanodeUuid(
|
||||
context.getParent().getDatanodeDetails().getUuidString());
|
||||
// submit the close container request for the XceiverServer to handle
|
||||
container.submitContainerRequest(
|
||||
request.build(), replicationType);
|
||||
|
||||
} catch (Exception e) {
|
||||
LOG.error("Can't close container " + containerID, e);
|
||||
|
|
|
@ -77,6 +77,10 @@ public final class CommandDispatcher {
|
|||
}
|
||||
}
|
||||
|
||||
public CommandHandler getCloseContainerHandler() {
|
||||
return handlerMap.get(Type.closeContainerCommand);
|
||||
}
|
||||
|
||||
/**
|
||||
* Dispatch the command to the correct handler.
|
||||
*
|
||||
|
|
|
@ -186,6 +186,9 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
|
|||
LOG.debug("Block {} not found or already under deletion in"
|
||||
+ " container {}, skip deleting it.", blk, containerId);
|
||||
}
|
||||
containerDB.put(DFSUtil.string2Bytes(
|
||||
OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX + delTX.getContainerID()),
|
||||
Longs.toByteArray(delTX.getTxID()));
|
||||
}
|
||||
|
||||
// update pending deletion blocks count in in-memory container status
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
package org.apache.hadoop.ozone.container.common.states.endpoint;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.protobuf.Descriptors;
|
||||
import com.google.protobuf.GeneratedMessage;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
|
||||
|
@ -99,13 +101,13 @@ public class HeartbeatEndpointTask
|
|||
try {
|
||||
Preconditions.checkState(this.datanodeDetailsProto != null);
|
||||
|
||||
SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder()
|
||||
.setDatanodeDetails(datanodeDetailsProto)
|
||||
.setNodeReport(context.getNodeReport())
|
||||
.build();
|
||||
SCMHeartbeatRequestProto.Builder requestBuilder =
|
||||
SCMHeartbeatRequestProto.newBuilder()
|
||||
.setDatanodeDetails(datanodeDetailsProto);
|
||||
addReports(requestBuilder);
|
||||
|
||||
SCMHeartbeatResponseProto reponse = rpcEndpoint.getEndPoint()
|
||||
.sendHeartbeat(request);
|
||||
.sendHeartbeat(requestBuilder.build());
|
||||
processResponse(reponse, datanodeDetailsProto);
|
||||
rpcEndpoint.setLastSuccessfulHeartbeat(ZonedDateTime.now());
|
||||
rpcEndpoint.zeroMissedCount();
|
||||
|
@ -117,6 +119,24 @@ public class HeartbeatEndpointTask
|
|||
return rpcEndpoint.getState();
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds all the available reports to heartbeat.
|
||||
*
|
||||
* @param requestBuilder builder to which the report has to be added.
|
||||
*/
|
||||
private void addReports(SCMHeartbeatRequestProto.Builder requestBuilder) {
|
||||
for (GeneratedMessage report : context.getAllAvailableReports()) {
|
||||
String reportName = report.getDescriptorForType().getFullName();
|
||||
for (Descriptors.FieldDescriptor descriptor :
|
||||
SCMHeartbeatRequestProto.getDescriptor().getFields()) {
|
||||
String heartbeatFieldName = descriptor.getMessageType().getFullName();
|
||||
if (heartbeatFieldName.equals(reportName)) {
|
||||
requestBuilder.setField(descriptor, report);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a builder class for HeartbeatEndpointTask task.
|
||||
* @return Builder.
|
||||
|
|
|
@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.common.states.endpoint;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||
import org.apache.hadoop.ozone.container.common.statemachine
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.ozone.container.common.transport.server;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||
import org.apache.ratis.shaded.io.netty.bootstrap.ServerBootstrap;
|
||||
import org.apache.ratis.shaded.io.netty.channel.Channel;
|
||||
import org.apache.ratis.shaded.io.netty.channel.EventLoopGroup;
|
||||
|
@ -129,4 +130,10 @@ public final class XceiverServer implements XceiverServerSpi {
|
|||
channel.close().awaitUninterruptibly();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void submitRequest(
|
||||
ContainerProtos.ContainerCommandRequestProto request) throws IOException {
|
||||
storageContainer.dispatch(request);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.container.common.transport.server;
|
|||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
||||
import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
|
||||
|
@ -44,6 +45,7 @@ public final class XceiverServerGrpc implements XceiverServerSpi {
|
|||
LOG = LoggerFactory.getLogger(XceiverServerGrpc.class);
|
||||
private int port;
|
||||
private Server server;
|
||||
private final ContainerDispatcher storageContainer;
|
||||
|
||||
/**
|
||||
* Constructs a Grpc server class.
|
||||
|
@ -77,6 +79,7 @@ public final class XceiverServerGrpc implements XceiverServerSpi {
|
|||
.maxMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
|
||||
.addService(new GrpcXceiverService(dispatcher))
|
||||
.build();
|
||||
storageContainer = dispatcher;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -103,4 +106,10 @@ public final class XceiverServerGrpc implements XceiverServerSpi {
|
|||
public void stop() {
|
||||
server.shutdown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void submitRequest(
|
||||
ContainerProtos.ContainerCommandRequestProto request) throws IOException {
|
||||
storageContainer.dispatch(request);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
package org.apache.hadoop.ozone.container.common.transport.server;
|
||||
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -40,4 +41,10 @@ public interface XceiverServerSpi {
|
|||
*/
|
||||
HddsProtos.ReplicationType getServerType();
|
||||
|
||||
/**
|
||||
* submits a containerRequest to be performed by the replication pipeline.
|
||||
* @param request ContainerCommandRequest
|
||||
*/
|
||||
void submitRequest(ContainerProtos.ContainerCommandRequestProto request)
|
||||
throws IOException;
|
||||
}
|
||||
|
|
|
@ -265,7 +265,8 @@ public class ContainerStateMachine extends BaseStateMachine {
|
|||
Message message = runCommand(requestProto);
|
||||
if (cmdType == ContainerProtos.Type.CreateContainer) {
|
||||
long containerID =
|
||||
requestProto.getCreateContainer().getContainerData().getContainerID();
|
||||
requestProto.getCreateContainer()
|
||||
.getContainerData().getContainerID();
|
||||
createContainerFutureMap.remove(containerID).complete(message);
|
||||
}
|
||||
return CompletableFuture.completedFuture(message);
|
||||
|
|
|
@ -18,10 +18,12 @@
|
|||
|
||||
package org.apache.hadoop.ozone.container.common.transport.server.ratis;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Strings;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
||||
import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
|
||||
|
@ -33,10 +35,12 @@ import org.apache.ratis.client.RaftClientConfigKeys;
|
|||
import org.apache.ratis.conf.RaftProperties;
|
||||
import org.apache.ratis.grpc.GrpcConfigKeys;
|
||||
import org.apache.ratis.netty.NettyConfigKeys;
|
||||
import org.apache.ratis.protocol.*;
|
||||
import org.apache.ratis.rpc.RpcType;
|
||||
import org.apache.ratis.rpc.SupportedRpcType;
|
||||
import org.apache.ratis.server.RaftServer;
|
||||
import org.apache.ratis.server.RaftServerConfigKeys;
|
||||
import org.apache.ratis.shaded.proto.RaftProtos;
|
||||
import org.apache.ratis.util.SizeInBytes;
|
||||
import org.apache.ratis.util.TimeDuration;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -49,8 +53,10 @@ import java.net.ServerSocket;
|
|||
import java.net.SocketAddress;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
* Creates a ratis server endpoint that acts as the communication layer for
|
||||
|
@ -58,6 +64,12 @@ import java.util.concurrent.TimeUnit;
|
|||
*/
|
||||
public final class XceiverServerRatis implements XceiverServerSpi {
|
||||
static final Logger LOG = LoggerFactory.getLogger(XceiverServerRatis.class);
|
||||
private static final AtomicLong callIdCounter = new AtomicLong();
|
||||
|
||||
private static long nextCallId() {
|
||||
return callIdCounter.getAndIncrement() & Long.MAX_VALUE;
|
||||
}
|
||||
|
||||
private final int port;
|
||||
private final RaftServer server;
|
||||
private ThreadPoolExecutor writeChunkExecutor;
|
||||
|
@ -241,4 +253,46 @@ public final class XceiverServerRatis implements XceiverServerSpi {
|
|||
public HddsProtos.ReplicationType getServerType() {
|
||||
return HddsProtos.ReplicationType.RATIS;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public RaftServer getServer() {
|
||||
return server;
|
||||
}
|
||||
|
||||
private void processReply(RaftClientReply reply) {
|
||||
|
||||
// NotLeader exception is thrown only when the raft server to which the
|
||||
// request is submitted is not the leader. The request will be rejected
|
||||
// and will eventually be executed once the request comnes via the leader
|
||||
// node.
|
||||
NotLeaderException notLeaderException = reply.getNotLeaderException();
|
||||
if (notLeaderException != null) {
|
||||
LOG.info(reply.getNotLeaderException().getLocalizedMessage());
|
||||
}
|
||||
StateMachineException stateMachineException =
|
||||
reply.getStateMachineException();
|
||||
if (stateMachineException != null) {
|
||||
// In case the request could not be completed, StateMachine Exception
|
||||
// will be thrown. For now, Just log the message.
|
||||
// If the container could not be closed, SCM will come to know
|
||||
// via containerReports. CloseContainer should be re tried via SCM.
|
||||
LOG.error(stateMachineException.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void submitRequest(
|
||||
ContainerProtos.ContainerCommandRequestProto request) throws IOException {
|
||||
ClientId clientId = ClientId.randomId();
|
||||
RaftClientRequest raftClientRequest =
|
||||
new RaftClientRequest(clientId, server.getId(),
|
||||
RatisHelper.emptyRaftGroup().getGroupId(), nextCallId(), 0,
|
||||
Message.valueOf(request.toByteString()), RaftClientRequest
|
||||
// ReplicationLevel.ALL ensures the transactions corresponding to
|
||||
// the request here are applied on all the raft servers.
|
||||
.writeRequestType(RaftProtos.ReplicationLevel.ALL));
|
||||
CompletableFuture<RaftClientReply> reply =
|
||||
server.submitClientRequestAsync(raftClientRequest);
|
||||
reply.thenAccept(this::processReply);
|
||||
}
|
||||
}
|
|
@ -128,7 +128,8 @@ public final class ContainerCache extends LRUMap {
|
|||
public MetadataStore getDB(long containerID, String containerDBType, String
|
||||
containerDBPath)
|
||||
throws IOException {
|
||||
Preconditions.checkState(containerID >= 0, "Container ID cannot be negative.");
|
||||
Preconditions.checkState(containerID >= 0,
|
||||
"Container ID cannot be negative.");
|
||||
lock.lock();
|
||||
try {
|
||||
MetadataStore db = (MetadataStore) this.get(containerID);
|
||||
|
@ -157,7 +158,8 @@ public final class ContainerCache extends LRUMap {
|
|||
* @param containerID - ID of the container.
|
||||
*/
|
||||
public void removeDB(long containerID) {
|
||||
Preconditions.checkState(containerID >= 0, "Container ID cannot be negative.");
|
||||
Preconditions.checkState(containerID >= 0,
|
||||
"Container ID cannot be negative.");
|
||||
lock.lock();
|
||||
try {
|
||||
MetadataStore db = (MetadataStore)this.get(containerID);
|
||||
|
|
|
@ -18,7 +18,9 @@
|
|||
package org.apache.hadoop.ozone.container.ozoneimpl;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
|
||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||
|
@ -72,7 +74,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT;
|
|||
* layer.
|
||||
*/
|
||||
public class OzoneContainer {
|
||||
private static final Logger LOG =
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(OzoneContainer.class);
|
||||
|
||||
private final Configuration ozoneConfig;
|
||||
|
@ -269,9 +271,65 @@ public class OzoneContainer {
|
|||
return this.manager.getClosedContainerReports();
|
||||
}
|
||||
|
||||
private XceiverServerSpi getRatisSerer() {
|
||||
for (XceiverServerSpi serverInstance : server) {
|
||||
if (serverInstance instanceof XceiverServerRatis) {
|
||||
return serverInstance;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private XceiverServerSpi getStandaAloneSerer() {
|
||||
for (XceiverServerSpi serverInstance : server) {
|
||||
if (!(serverInstance instanceof XceiverServerRatis)) {
|
||||
return serverInstance;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public ContainerManager getContainerManager() {
|
||||
return this.manager;
|
||||
}
|
||||
|
||||
public void submitContainerRequest(
|
||||
ContainerProtos.ContainerCommandRequestProto request,
|
||||
HddsProtos.ReplicationType replicationType) throws IOException {
|
||||
XceiverServerSpi serverInstance;
|
||||
long containerId = getContainerIdForCmd(request);
|
||||
if (replicationType == HddsProtos.ReplicationType.RATIS) {
|
||||
serverInstance = getRatisSerer();
|
||||
Preconditions.checkNotNull(serverInstance);
|
||||
serverInstance.submitRequest(request);
|
||||
LOG.info("submitting {} request over RATIS server for container {}",
|
||||
request.getCmdType(), containerId);
|
||||
} else {
|
||||
serverInstance = getStandaAloneSerer();
|
||||
Preconditions.checkNotNull(serverInstance);
|
||||
getStandaAloneSerer().submitRequest(request);
|
||||
LOG.info(
|
||||
"submitting {} request over STAND_ALONE server for container {}",
|
||||
request.getCmdType(), containerId);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private long getContainerIdForCmd(
|
||||
ContainerProtos.ContainerCommandRequestProto request)
|
||||
throws IllegalArgumentException {
|
||||
ContainerProtos.Type type = request.getCmdType();
|
||||
switch (type) {
|
||||
case CloseContainer:
|
||||
return request.getCloseContainer().getContainerID();
|
||||
// Right now, we handle only closeContainer via queuing it over the
|
||||
// over the XceiVerServer. For all other commands we throw Illegal
|
||||
// argument exception here. Will need to extend the switch cases
|
||||
// in case we want add another commands here.
|
||||
default:
|
||||
throw new IllegalArgumentException("Cmd " + request.getCmdType()
|
||||
+ " not supported over HearBeat Response");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.ozone.protocol.commands;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
|
@ -31,9 +32,12 @@ public class CloseContainerCommand
|
|||
extends SCMCommand<CloseContainerCommandProto> {
|
||||
|
||||
private long containerID;
|
||||
private HddsProtos.ReplicationType replicationType;
|
||||
|
||||
public CloseContainerCommand(long containerID) {
|
||||
public CloseContainerCommand(long containerID,
|
||||
HddsProtos.ReplicationType replicationType) {
|
||||
this.containerID = containerID;
|
||||
this.replicationType = replicationType;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -58,13 +62,15 @@ public class CloseContainerCommand
|
|||
|
||||
public CloseContainerCommandProto getProto() {
|
||||
return CloseContainerCommandProto.newBuilder()
|
||||
.setContainerID(containerID).build();
|
||||
.setContainerID(containerID)
|
||||
.setReplicationType(replicationType).build();
|
||||
}
|
||||
|
||||
public static CloseContainerCommand getFromProtobuf(
|
||||
CloseContainerCommandProto closeContainerProto) {
|
||||
Preconditions.checkNotNull(closeContainerProto);
|
||||
return new CloseContainerCommand(closeContainerProto.getContainerID());
|
||||
return new CloseContainerCommand(closeContainerProto.getContainerID(),
|
||||
closeContainerProto.getReplicationType());
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -160,6 +160,7 @@ message ContainerInfo {
|
|||
optional int64 writeBytes = 8;
|
||||
optional string finalhash = 9;
|
||||
optional hadoop.hdds.LifeCycleState state = 10;
|
||||
optional int64 deleteTransactionId = 11;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -222,6 +223,7 @@ This command asks the datanode to close a specific container.
|
|||
*/
|
||||
message CloseContainerCommandProto {
|
||||
required int64 containerID = 1;
|
||||
required hadoop.hdds.ReplicationType replicationType = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.ozone.container.common;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
|
||||
import org.apache.hadoop.hdds.scm.VersionInfo;
|
||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||
|
@ -179,6 +180,7 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
|
|||
List<SCMCommandProto>
|
||||
cmdResponses = new LinkedList<>();
|
||||
return SCMHeartbeatResponseProto.newBuilder().addAllCommands(cmdResponses)
|
||||
.setDatanodeUUID(heartbeat.getDatanodeDetails().getUuid())
|
||||
.build();
|
||||
}
|
||||
|
||||
|
@ -197,6 +199,7 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
|
|||
throws IOException {
|
||||
rpcCount.incrementAndGet();
|
||||
updateNodeReport(datanodeDetailsProto, nodeReport);
|
||||
updateContainerReport(containerReportsRequestProto, datanodeDetailsProto);
|
||||
sleepIfNeeded();
|
||||
return StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto
|
||||
.newBuilder().setClusterID(UUID.randomUUID().toString())
|
||||
|
@ -227,6 +230,35 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the cotainerReport.
|
||||
*
|
||||
* @param reports Container report
|
||||
* @param datanodeDetails DataNode Info
|
||||
* @throws IOException
|
||||
*/
|
||||
public void updateContainerReport(
|
||||
StorageContainerDatanodeProtocolProtos.ContainerReportsProto reports,
|
||||
DatanodeDetailsProto datanodeDetails) throws IOException {
|
||||
Preconditions.checkNotNull(reports);
|
||||
containerReportsCount.incrementAndGet();
|
||||
DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf(
|
||||
datanodeDetails);
|
||||
if (reports.getReportsCount() > 0) {
|
||||
Map containers = nodeContainers.get(datanode);
|
||||
if (containers == null) {
|
||||
containers = new LinkedHashMap();
|
||||
nodeContainers.put(datanode, containers);
|
||||
}
|
||||
|
||||
for (StorageContainerDatanodeProtocolProtos.ContainerInfo report : reports
|
||||
.getReportsList()) {
|
||||
containers.put(report.getContainerID(), report);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Return the number of StorageReports of a datanode.
|
||||
* @param datanodeDetails
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
* SCM Testing and Mocking Utils.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.container.common;
|
|
@ -0,0 +1,52 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.report;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
/**
|
||||
* Test cases to test {@link ReportManager}.
|
||||
*/
|
||||
public class TestReportManager {
|
||||
|
||||
@Test
|
||||
public void testReportManagerInit() {
|
||||
Configuration conf = new OzoneConfiguration();
|
||||
StateContext dummyContext = Mockito.mock(StateContext.class);
|
||||
ReportPublisher dummyPublisher = Mockito.mock(ReportPublisher.class);
|
||||
ReportManager.Builder builder = ReportManager.newBuilder(conf);
|
||||
builder.setStateContext(dummyContext);
|
||||
builder.addPublisher(dummyPublisher);
|
||||
ReportManager reportManager = builder.build();
|
||||
reportManager.init();
|
||||
verify(dummyPublisher, times(1)).init(eq(dummyContext),
|
||||
any(ScheduledExecutorService.class));
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,185 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.report;
|
||||
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import com.google.protobuf.Descriptors;
|
||||
import com.google.protobuf.GeneratedMessage;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.NodeReportProto;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
|
||||
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
|
||||
import org.apache.hadoop.util.concurrent.HadoopExecutors;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
/**
|
||||
* Test cases to test {@link ReportPublisher}.
|
||||
*/
|
||||
public class TestReportPublisher {
|
||||
|
||||
/**
|
||||
* Dummy report publisher for testing.
|
||||
*/
|
||||
private class DummyReportPublisher extends ReportPublisher {
|
||||
|
||||
private final long frequency;
|
||||
private int getReportCount = 0;
|
||||
|
||||
DummyReportPublisher(long frequency) {
|
||||
this.frequency = frequency;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected long getReportFrequency() {
|
||||
return frequency;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GeneratedMessage getReport() {
|
||||
getReportCount++;
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReportPublisherInit() {
|
||||
ReportPublisher publisher = new DummyReportPublisher(0);
|
||||
StateContext dummyContext = Mockito.mock(StateContext.class);
|
||||
ScheduledExecutorService dummyExecutorService = Mockito.mock(
|
||||
ScheduledExecutorService.class);
|
||||
publisher.init(dummyContext, dummyExecutorService);
|
||||
verify(dummyExecutorService, times(1)).schedule(publisher,
|
||||
0, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testScheduledReport() throws InterruptedException {
|
||||
ReportPublisher publisher = new DummyReportPublisher(100);
|
||||
StateContext dummyContext = Mockito.mock(StateContext.class);
|
||||
ScheduledExecutorService executorService = HadoopExecutors
|
||||
.newScheduledThreadPool(1,
|
||||
new ThreadFactoryBuilder().setDaemon(true)
|
||||
.setNameFormat("Unit test ReportManager Thread - %d").build());
|
||||
publisher.init(dummyContext, executorService);
|
||||
Thread.sleep(150);
|
||||
Assert.assertEquals(1, ((DummyReportPublisher)publisher).getReportCount);
|
||||
Thread.sleep(150);
|
||||
Assert.assertEquals(2, ((DummyReportPublisher)publisher).getReportCount);
|
||||
executorService.shutdown();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPublishReport() throws InterruptedException {
|
||||
ReportPublisher publisher = new DummyReportPublisher(100);
|
||||
StateContext dummyContext = Mockito.mock(StateContext.class);
|
||||
ScheduledExecutorService executorService = HadoopExecutors
|
||||
.newScheduledThreadPool(1,
|
||||
new ThreadFactoryBuilder().setDaemon(true)
|
||||
.setNameFormat("Unit test ReportManager Thread - %d").build());
|
||||
publisher.init(dummyContext, executorService);
|
||||
Thread.sleep(150);
|
||||
executorService.shutdown();
|
||||
Assert.assertEquals(1, ((DummyReportPublisher)publisher).getReportCount);
|
||||
verify(dummyContext, times(1)).addReport(null);
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAddingReportToHeartbeat() {
|
||||
Configuration conf = new OzoneConfiguration();
|
||||
ReportPublisherFactory factory = new ReportPublisherFactory(conf);
|
||||
ReportPublisher nodeReportPublisher = factory.getPublisherFor(
|
||||
NodeReportProto.class);
|
||||
ReportPublisher containerReportPubliser = factory.getPublisherFor(
|
||||
ContainerReportsProto.class);
|
||||
GeneratedMessage nodeReport = nodeReportPublisher.getReport();
|
||||
GeneratedMessage containerReport = containerReportPubliser.getReport();
|
||||
SCMHeartbeatRequestProto.Builder heartbeatBuilder =
|
||||
SCMHeartbeatRequestProto.newBuilder();
|
||||
heartbeatBuilder.setDatanodeDetails(
|
||||
getDatanodeDetails().getProtoBufMessage());
|
||||
addReport(heartbeatBuilder, nodeReport);
|
||||
addReport(heartbeatBuilder, containerReport);
|
||||
SCMHeartbeatRequestProto heartbeat = heartbeatBuilder.build();
|
||||
Assert.assertTrue(heartbeat.hasNodeReport());
|
||||
Assert.assertTrue(heartbeat.hasContainerReport());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a datanode details.
|
||||
*
|
||||
* @return DatanodeDetails
|
||||
*/
|
||||
private static DatanodeDetails getDatanodeDetails() {
|
||||
String uuid = UUID.randomUUID().toString();
|
||||
Random random = new Random();
|
||||
String ipAddress =
|
||||
random.nextInt(256) + "." + random.nextInt(256) + "." + random
|
||||
.nextInt(256) + "." + random.nextInt(256);
|
||||
|
||||
DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
|
||||
DatanodeDetails.Port.Name.STANDALONE, 0);
|
||||
DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
|
||||
DatanodeDetails.Port.Name.RATIS, 0);
|
||||
DatanodeDetails.Port restPort = DatanodeDetails.newPort(
|
||||
DatanodeDetails.Port.Name.REST, 0);
|
||||
DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
|
||||
builder.setUuid(uuid)
|
||||
.setHostName("localhost")
|
||||
.setIpAddress(ipAddress)
|
||||
.addPort(containerPort)
|
||||
.addPort(ratisPort)
|
||||
.addPort(restPort);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the report to heartbeat.
|
||||
*
|
||||
* @param requestBuilder builder to which the report has to be added.
|
||||
* @param report the report to be added.
|
||||
*/
|
||||
private static void addReport(SCMHeartbeatRequestProto.Builder requestBuilder,
|
||||
GeneratedMessage report) {
|
||||
String reportName = report.getDescriptorForType().getFullName();
|
||||
for (Descriptors.FieldDescriptor descriptor :
|
||||
SCMHeartbeatRequestProto.getDescriptor().getFields()) {
|
||||
String heartbeatFieldName = descriptor.getMessageType().getFullName();
|
||||
if (heartbeatFieldName.equals(reportName)) {
|
||||
requestBuilder.setField(descriptor, report);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.report;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.NodeReportProto;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
/**
|
||||
* Test cases to test ReportPublisherFactory.
|
||||
*/
|
||||
public class TestReportPublisherFactory {
|
||||
|
||||
@Rule
|
||||
public ExpectedException exception = ExpectedException.none();
|
||||
|
||||
@Test
|
||||
public void testGetContainerReportPublisher() {
|
||||
Configuration conf = new OzoneConfiguration();
|
||||
ReportPublisherFactory factory = new ReportPublisherFactory(conf);
|
||||
ReportPublisher publisher = factory
|
||||
.getPublisherFor(ContainerReportsProto.class);
|
||||
Assert.assertEquals(ContainerReportPublisher.class, publisher.getClass());
|
||||
Assert.assertEquals(conf, publisher.getConf());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetNodeReportPublisher() {
|
||||
Configuration conf = new OzoneConfiguration();
|
||||
ReportPublisherFactory factory = new ReportPublisherFactory(conf);
|
||||
ReportPublisher publisher = factory
|
||||
.getPublisherFor(NodeReportProto.class);
|
||||
Assert.assertEquals(NodeReportPublisher.class, publisher.getClass());
|
||||
Assert.assertEquals(conf, publisher.getConf());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInvalidReportPublisher() {
|
||||
Configuration conf = new OzoneConfiguration();
|
||||
ReportPublisherFactory factory = new ReportPublisherFactory(conf);
|
||||
exception.expect(RuntimeException.class);
|
||||
exception.expectMessage("No publisher found for report");
|
||||
factory.getPublisherFor(HddsProtos.DatanodeDetailsProto.class);
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
/*
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,16 +15,8 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.container.common.interfaces;
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.report;
|
||||
/**
|
||||
* Interface for container report manager operations.
|
||||
* This package has test cases for all the report publishers which generates
|
||||
* reports that are sent to SCM via heartbeat.
|
||||
*/
|
||||
public interface ContainerReportManager {
|
||||
|
||||
/**
|
||||
* Check if we have to send container report.
|
||||
* @return true if container report has to be sent.
|
||||
*/
|
||||
boolean shouldSendContainerReport();
|
||||
}
|
|
@ -361,13 +361,10 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
|
|||
}
|
||||
}
|
||||
|
||||
// We update SCM DB first, so if this step fails, we end up here,
|
||||
// nothing gets into the delLog so no blocks will be accidentally
|
||||
// removed. If we write the log first, once log is written, the
|
||||
// async deleting service will start to scan and might be picking
|
||||
// up some blocks to do real deletions, that might cause data loss.
|
||||
try {
|
||||
Map<Long, Long> deleteTransactionsMap =
|
||||
deletedBlockLog.addTransactions(containerBlocks);
|
||||
containerManager.updateDeleteTransactionId(deleteTransactionsMap);
|
||||
} catch (IOException e) {
|
||||
throw new IOException(
|
||||
"Skip writing the deleted blocks info to"
|
||||
|
|
|
@ -108,9 +108,10 @@ public interface DeletedBlockLog extends Closeable {
|
|||
* number of containers) together (on success) or non (on failure).
|
||||
*
|
||||
* @param containerBlocksMap a map of containerBlocks.
|
||||
* @return Mapping from containerId to latest transactionId for the container.
|
||||
* @throws IOException
|
||||
*/
|
||||
void addTransactions(Map<Long, List<Long>> containerBlocksMap)
|
||||
Map<Long, Long> addTransactions(Map<Long, List<Long>> containerBlocksMap)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
|
|
@ -36,6 +36,7 @@ import java.io.File;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -306,12 +307,15 @@ public class DeletedBlockLogImpl implements DeletedBlockLog {
|
|||
* {@inheritDoc}
|
||||
*
|
||||
* @param containerBlocksMap a map of containerBlocks.
|
||||
* @return Mapping from containerId to latest transactionId for the container.
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
public void addTransactions(Map<Long, List<Long>> containerBlocksMap)
|
||||
public Map<Long, Long> addTransactions(
|
||||
Map<Long, List<Long>> containerBlocksMap)
|
||||
throws IOException {
|
||||
BatchOperation batch = new BatchOperation();
|
||||
Map<Long, Long> deleteTransactionsMap = new HashMap<>();
|
||||
lock.lock();
|
||||
try {
|
||||
long currentLatestID = lastTxID;
|
||||
|
@ -321,11 +325,13 @@ public class DeletedBlockLogImpl implements DeletedBlockLog {
|
|||
byte[] key = Longs.toByteArray(currentLatestID);
|
||||
DeletedBlocksTransaction tx = constructNewTransaction(currentLatestID,
|
||||
entry.getKey(), entry.getValue());
|
||||
deleteTransactionsMap.put(entry.getKey(), currentLatestID);
|
||||
batch.put(key, tx.toByteArray());
|
||||
}
|
||||
lastTxID = currentLatestID;
|
||||
batch.put(LATEST_TXID, Longs.toByteArray(lastTxID));
|
||||
deletedStore.writeBatch(batch);
|
||||
return deleteTransactionsMap;
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
* <p>
|
||||
* <p>http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* <p>Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdds.scm.container;
|
||||
|
||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
|
||||
import org.apache.hadoop.hdds.server.events.EventHandler;
|
||||
import org.apache.hadoop.hdds.server.events.EventPublisher;
|
||||
import org.apache.hadoop.hdds.server.events.TypedEvent;
|
||||
import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* In case of a node failure, volume failure, volume out of spapce, node
|
||||
* out of space etc, CLOSE_CONTAINER_EVENT will be triggered.
|
||||
* CloseContainerEventHandler is the handler for CLOSE_CONTAINER_EVENT.
|
||||
* When a close container event is fired, a close command for the container
|
||||
* should be sent to all the datanodes in the pipeline and containerStateManager
|
||||
* needs to update the container state to Closing.
|
||||
*/
|
||||
public class CloseContainerEventHandler implements EventHandler<ContainerID> {
|
||||
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(CloseContainerEventHandler.class);
|
||||
|
||||
public static final TypedEvent<ContainerID> CLOSE_CONTAINER_EVENT =
|
||||
new TypedEvent<>(ContainerID.class);
|
||||
|
||||
private final Mapping containerManager;
|
||||
|
||||
public CloseContainerEventHandler(Mapping containerManager) {
|
||||
this.containerManager = containerManager;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onMessage(ContainerID containerID, EventPublisher publisher) {
|
||||
|
||||
LOG.info("Close container Event triggered for container : {}",
|
||||
containerID.getId());
|
||||
ContainerStateManager stateManager = containerManager.getStateManager();
|
||||
ContainerInfo info = stateManager.getContainer(containerID);
|
||||
if (info == null) {
|
||||
LOG.info("Container with id : {} does not exist", containerID.getId());
|
||||
return;
|
||||
}
|
||||
if (info.getState() == HddsProtos.LifeCycleState.OPEN) {
|
||||
for (DatanodeDetails datanode : info.getPipeline().getMachines()) {
|
||||
containerManager.getNodeManager().addDatanodeCommand(datanode.getUuid(),
|
||||
new CloseContainerCommand(containerID.getId(),
|
||||
info.getPipeline().getType()));
|
||||
}
|
||||
try {
|
||||
// Finalize event will make sure the state of the container transitions
|
||||
// from OPEN to CLOSING in containerStateManager.
|
||||
stateManager
|
||||
.updateContainerState(info, HddsProtos.LifeCycleEvent.FINALIZE);
|
||||
} catch (SCMException ex) {
|
||||
LOG.error("Failed to update the container state for container : {}"
|
||||
+ containerID);
|
||||
}
|
||||
} else {
|
||||
LOG.info("container with id : {} is in {} state and need not be closed.",
|
||||
containerID.getId(), info.getState());
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -341,6 +341,39 @@ public class ContainerMapping implements Mapping {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update deleteTransactionId according to deleteTransactionMap.
|
||||
*
|
||||
* @param deleteTransactionMap Maps the containerId to latest delete
|
||||
* transaction id for the container.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void updateDeleteTransactionId(Map<Long, Long> deleteTransactionMap)
|
||||
throws IOException {
|
||||
lock.lock();
|
||||
try {
|
||||
for (Map.Entry<Long, Long> entry : deleteTransactionMap.entrySet()) {
|
||||
long containerID = entry.getKey();
|
||||
byte[] dbKey = Longs.toByteArray(containerID);
|
||||
byte[] containerBytes = containerStore.get(dbKey);
|
||||
if (containerBytes == null) {
|
||||
throw new SCMException(
|
||||
"Failed to increment number of deleted blocks for container "
|
||||
+ containerID + ", reason : " + "container doesn't exist.",
|
||||
SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER);
|
||||
}
|
||||
ContainerInfo containerInfo = ContainerInfo.fromProtobuf(
|
||||
HddsProtos.SCMContainerInfo.parseFrom(containerBytes));
|
||||
containerInfo.updateDeleteTransactionId(entry.getValue());
|
||||
containerStore.put(dbKey, containerInfo.getProtobuf().toByteArray());
|
||||
containerStateManager
|
||||
.updateDeleteTransactionId(containerID, entry.getValue());
|
||||
}
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the container State Manager.
|
||||
*
|
||||
|
@ -441,6 +474,7 @@ public class ContainerMapping implements Mapping {
|
|||
builder.setState(knownState.getState());
|
||||
builder.setStateEnterTime(knownState.getStateEnterTime());
|
||||
builder.setContainerID(knownState.getContainerID());
|
||||
builder.setDeleteTransactionId(knownState.getDeleteTransactionId());
|
||||
if (knownState.getOwner() != null) {
|
||||
builder.setOwner(knownState.getOwner());
|
||||
}
|
||||
|
@ -571,6 +605,7 @@ public class ContainerMapping implements Mapping {
|
|||
.setPipeline(oldInfo.getPipeline())
|
||||
.setState(oldInfo.getState())
|
||||
.setUsedBytes(oldInfo.getUsedBytes())
|
||||
.setDeleteTransactionId(oldInfo.getDeleteTransactionId())
|
||||
.build();
|
||||
containerStore.put(dbKey, newInfo.getProtobuf().toByteArray());
|
||||
} else {
|
||||
|
@ -588,6 +623,11 @@ public class ContainerMapping implements Mapping {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodeManager getNodeManager() {
|
||||
return nodeManager;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public MetadataStore getContainerStore() {
|
||||
return containerStore;
|
||||
|
|
|
@ -304,6 +304,7 @@ public class ContainerStateManager implements Closeable {
|
|||
.setStateEnterTime(Time.monotonicNow())
|
||||
.setOwner(owner)
|
||||
.setContainerID(containerCount.incrementAndGet())
|
||||
.setDeleteTransactionId(0)
|
||||
.build();
|
||||
Preconditions.checkNotNull(containerInfo);
|
||||
containers.addContainer(containerInfo);
|
||||
|
@ -351,6 +352,17 @@ public class ContainerStateManager implements Closeable {
|
|||
return containers.getContainerInfo(info);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update deleteTransactionId for a container.
|
||||
*
|
||||
* @param containerID ContainerID of the container whose delete
|
||||
* transactionId needs to be updated.
|
||||
* @param transactionId latest transactionId to be updated for the container
|
||||
*/
|
||||
public void updateDeleteTransactionId(Long containerID, long transactionId) {
|
||||
containers.getContainerMap().get(ContainerID.valueof(containerID))
|
||||
.updateDeleteTransactionId(transactionId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a container matching the attributes specified.
|
||||
|
@ -445,6 +457,15 @@ public class ContainerStateManager implements Closeable {
|
|||
factor, type);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the containerInfo for the given container id.
|
||||
* @param containerID id of the container
|
||||
* @return ContainerInfo containerInfo
|
||||
* @throws IOException
|
||||
*/
|
||||
public ContainerInfo getContainer(ContainerID containerID) {
|
||||
return containers.getContainerInfo(containerID.getId());
|
||||
}
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
}
|
||||
|
|
|
@ -21,10 +21,12 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
|||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||
import org.apache.hadoop.hdds.protocol.proto
|
||||
.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
|
||||
import org.apache.hadoop.hdds.scm.node.NodeManager;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Mapping class contains the mapping from a name to a pipeline mapping. This is
|
||||
|
@ -103,4 +105,19 @@ public interface Mapping extends Closeable {
|
|||
ContainerReportsProto reports)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Update deleteTransactionId according to deleteTransactionMap.
|
||||
*
|
||||
* @param deleteTransactionMap Maps the containerId to latest delete
|
||||
* transaction id for the container.
|
||||
* @throws IOException
|
||||
*/
|
||||
void updateDeleteTransactionId(Map<Long, Long> deleteTransactionMap)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Returns the nodeManager.
|
||||
* @return NodeManager
|
||||
*/
|
||||
NodeManager getNodeManager();
|
||||
}
|
||||
|
|
|
@ -127,11 +127,12 @@ public class ContainerCloser {
|
|||
// to SCM. In that case also, data node will ignore this command.
|
||||
|
||||
HddsProtos.Pipeline pipeline = info.getPipeline();
|
||||
for (HddsProtos.DatanodeDetailsProto datanodeDetails :
|
||||
pipeline.getPipelineChannel().getMembersList()) {
|
||||
for (HddsProtos.DatanodeDetailsProto datanodeDetails : pipeline
|
||||
.getPipelineChannel().getMembersList()) {
|
||||
nodeManager.addDatanodeCommand(
|
||||
DatanodeDetails.getFromProtoBuf(datanodeDetails).getUuid(),
|
||||
new CloseContainerCommand(info.getContainerID()));
|
||||
new CloseContainerCommand(info.getContainerID(),
|
||||
pipeline.getPipelineChannel().getType()));
|
||||
}
|
||||
if (!commandIssued.containsKey(info.getContainerID())) {
|
||||
commandIssued.put(info.getContainerID(),
|
||||
|
|
|
@ -69,7 +69,7 @@ import static org.apache.hadoop.hdds.protocol.proto
|
|||
|
||||
|
||||
import org.apache.hadoop.hdds.scm.HddsServerUtil;
|
||||
import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
|
||||
import org.apache.hadoop.hdds.scm.server.report.SCMDatanodeHeartbeatDispatcher;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
|
@ -114,6 +114,7 @@ public class SCMDatanodeProtocolServer implements
|
|||
|
||||
private final StorageContainerManager scm;
|
||||
private final InetSocketAddress datanodeRpcAddress;
|
||||
private final SCMDatanodeHeartbeatDispatcher heartbeatDispatcher;
|
||||
|
||||
public SCMDatanodeProtocolServer(final OzoneConfiguration conf,
|
||||
StorageContainerManager scm) throws IOException {
|
||||
|
@ -148,16 +149,24 @@ public class SCMDatanodeProtocolServer implements
|
|||
updateRPCListenAddress(
|
||||
conf, OZONE_SCM_DATANODE_ADDRESS_KEY, datanodeRpcAddr,
|
||||
datanodeRpcServer);
|
||||
|
||||
heartbeatDispatcher = SCMDatanodeHeartbeatDispatcher.newBuilder(conf, scm)
|
||||
.addHandlerFor(NodeReportProto.class)
|
||||
.addHandlerFor(ContainerReportsProto.class)
|
||||
.build();
|
||||
}
|
||||
|
||||
public void start() {
|
||||
LOG.info(
|
||||
StorageContainerManager.buildRpcServerStartMessage(
|
||||
"RPC server for DataNodes", datanodeRpcAddress));
|
||||
datanodeRpcServer.start();
|
||||
}
|
||||
|
||||
public InetSocketAddress getDatanodeRpcAddress() {
|
||||
return datanodeRpcAddress;
|
||||
}
|
||||
|
||||
public RPC.Server getDatanodeRpcServer() {
|
||||
return datanodeRpcServer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SCMVersionResponseProto getVersion(SCMVersionRequestProto
|
||||
versionRequest)
|
||||
|
@ -166,25 +175,6 @@ public class SCMDatanodeProtocolServer implements
|
|||
.getProtobufMessage();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SCMHeartbeatResponseProto sendHeartbeat(
|
||||
SCMHeartbeatRequestProto heartbeat)
|
||||
throws IOException {
|
||||
// TODO: Add a heartbeat dispatcher.
|
||||
DatanodeDetails datanodeDetails = DatanodeDetails
|
||||
.getFromProtoBuf(heartbeat.getDatanodeDetails());
|
||||
NodeReportProto nodeReport = heartbeat.getNodeReport();
|
||||
List<SCMCommand> commands =
|
||||
scm.getScmNodeManager().sendHeartbeat(datanodeDetails, nodeReport);
|
||||
List<SCMCommandProto> cmdResponses = new LinkedList<>();
|
||||
for (SCMCommand cmd : commands) {
|
||||
cmdResponses.add(getCommandResponse(cmd));
|
||||
}
|
||||
return SCMHeartbeatResponseProto.newBuilder()
|
||||
.setDatanodeUUID(datanodeDetails.getUuidString())
|
||||
.addAllCommands(cmdResponses).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SCMRegisteredResponseProto register(
|
||||
HddsProtos.DatanodeDetailsProto datanodeDetailsProto,
|
||||
|
@ -216,36 +206,27 @@ public class SCMDatanodeProtocolServer implements
|
|||
.build();
|
||||
}
|
||||
|
||||
public void processContainerReports(DatanodeDetails datanodeDetails,
|
||||
ContainerReportsProto reports)
|
||||
@Override
|
||||
public SCMHeartbeatResponseProto sendHeartbeat(
|
||||
SCMHeartbeatRequestProto heartbeat)
|
||||
throws IOException {
|
||||
updateContainerReportMetrics(datanodeDetails, reports);
|
||||
// should we process container reports async?
|
||||
scm.getScmContainerManager()
|
||||
.processContainerReports(datanodeDetails, reports);
|
||||
}
|
||||
heartbeatDispatcher.dispatch(heartbeat);
|
||||
|
||||
private void updateContainerReportMetrics(DatanodeDetails datanodeDetails,
|
||||
ContainerReportsProto reports) {
|
||||
ContainerStat newStat = new ContainerStat();
|
||||
for (StorageContainerDatanodeProtocolProtos.ContainerInfo info : reports
|
||||
.getReportsList()) {
|
||||
newStat.add(new ContainerStat(info.getSize(), info.getUsed(),
|
||||
info.getKeyCount(), info.getReadBytes(), info.getWriteBytes(),
|
||||
info.getReadCount(), info.getWriteCount()));
|
||||
// TODO: Remove the below code after SCM refactoring.
|
||||
DatanodeDetails datanodeDetails = DatanodeDetails
|
||||
.getFromProtoBuf(heartbeat.getDatanodeDetails());
|
||||
NodeReportProto nodeReport = heartbeat.getNodeReport();
|
||||
List<SCMCommand> commands =
|
||||
scm.getScmNodeManager().sendHeartbeat(datanodeDetails, nodeReport);
|
||||
List<SCMCommandProto> cmdResponses = new LinkedList<>();
|
||||
for (SCMCommand cmd : commands) {
|
||||
cmdResponses.add(getCommandResponse(cmd));
|
||||
}
|
||||
// update container metrics
|
||||
StorageContainerManager.getMetrics().setLastContainerStat(newStat);
|
||||
|
||||
// Update container stat entry, this will trigger a removal operation if it
|
||||
// exists in cache.
|
||||
String datanodeUuid = datanodeDetails.getUuidString();
|
||||
scm.getContainerReportCache().put(datanodeUuid, newStat);
|
||||
// update global view container metrics
|
||||
StorageContainerManager.getMetrics().incrContainerStat(newStat);
|
||||
return SCMHeartbeatResponseProto.newBuilder()
|
||||
.setDatanodeUUID(datanodeDetails.getUuidString())
|
||||
.addAllCommands(cmdResponses).build();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public ContainerBlocksDeletionACKResponseProto sendContainerBlocksDeletionACK(
|
||||
ContainerBlocksDeletionACKProto acks) throws IOException {
|
||||
|
@ -271,28 +252,6 @@ public class SCMDatanodeProtocolServer implements
|
|||
.getDefaultInstanceForType();
|
||||
}
|
||||
|
||||
public void start() {
|
||||
LOG.info(
|
||||
StorageContainerManager.buildRpcServerStartMessage(
|
||||
"RPC server for DataNodes", getDatanodeRpcAddress()));
|
||||
getDatanodeRpcServer().start();
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
try {
|
||||
LOG.info("Stopping the RPC server for DataNodes");
|
||||
datanodeRpcServer.stop();
|
||||
} catch (Exception ex) {
|
||||
LOG.error(" datanodeRpcServer stop failed.", ex);
|
||||
}
|
||||
IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager());
|
||||
}
|
||||
|
||||
public void join() throws InterruptedException {
|
||||
LOG.trace("Join RPC server for DataNodes");
|
||||
datanodeRpcServer.join();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a SCMCommandRepose from the SCM Command.
|
||||
*
|
||||
|
@ -338,4 +297,22 @@ public class SCMDatanodeProtocolServer implements
|
|||
throw new IllegalArgumentException("Not implemented");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void join() throws InterruptedException {
|
||||
LOG.trace("Join RPC server for DataNodes");
|
||||
datanodeRpcServer.join();
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
try {
|
||||
LOG.info("Stopping the RPC server for DataNodes");
|
||||
datanodeRpcServer.stop();
|
||||
heartbeatDispatcher.shutdown();
|
||||
} catch (Exception ex) {
|
||||
LOG.error(" datanodeRpcServer stop failed.", ex);
|
||||
}
|
||||
IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue