Merge trunk to HDFS-4685.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4685@1566100 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
c6505f0499
|
@ -300,6 +300,17 @@ prebuildWithoutPatch () {
|
|||
{color:red}-1 patch{color}. Trunk compilation may be broken."
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavadocWarnings.txt 2>&1"
|
||||
$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavadocWarnings.txt 2>&1
|
||||
if [[ $? != 0 ]] ; then
|
||||
echo "Trunk javadoc compilation is broken?"
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 patch{color}. Trunk compilation may be broken."
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -401,6 +412,11 @@ applyPatch () {
|
|||
}
|
||||
|
||||
###############################################################################
|
||||
calculateJavadocWarnings() {
|
||||
WARNING_FILE="$1"
|
||||
RET=$(egrep "^[0-9]+ warnings$" "$WARNING_FILE" | awk '{sum+=$1} END {print sum}')
|
||||
}
|
||||
|
||||
### Check there are no javadoc warnings
|
||||
checkJavadocWarnings () {
|
||||
echo ""
|
||||
|
@ -420,24 +436,29 @@ checkJavadocWarnings () {
|
|||
(cd hadoop-common-project/hadoop-annotations; $MVN install > /dev/null 2>&1)
|
||||
fi
|
||||
$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1
|
||||
javadocWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavadocWarnings.txt | $AWK '/Javadoc Warnings/,EOF' | $GREP warning | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
|
||||
echo ""
|
||||
echo ""
|
||||
echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build."
|
||||
calculateJavadocWarnings "$PATCH_DIR/trunkJavadocWarnings.txt"
|
||||
numTrunkJavadocWarnings=$RET
|
||||
calculateJavadocWarnings "$PATCH_DIR/patchJavadocWarnings.txt"
|
||||
numPatchJavadocWarnings=$RET
|
||||
grep -i warning "$PATCH_DIR/trunkJavadocWarnings.txt" > "$PATCH_DIR/trunkJavadocWarningsFiltered.txt"
|
||||
grep -i warning "$PATCH_DIR/patchJavadocWarnings.txt" > "$PATCH_DIR/patchJavadocWarningsFiltered.txt"
|
||||
diff -u "$PATCH_DIR/trunkJavadocWarningsFiltered.txt" \
|
||||
"$PATCH_DIR/patchJavadocWarningsFiltered.txt" > \
|
||||
"$PATCH_DIR/diffJavadocWarnings.txt"
|
||||
rm -f "$PATCH_DIR/trunkJavadocWarningsFiltered.txt" "$PATCH_DIR/patchJavadocWarningsFiltered.txt"
|
||||
echo "There appear to be $numTrunkJavadocWarnings javadoc warnings before the patch and $numPatchJavadocWarnings javadoc warnings after applying the patch."
|
||||
if [[ $numTrunkJavadocWarnings != "" && $numPatchJavadocWarnings != "" ]] ; then
|
||||
if [[ $numPatchJavadocWarnings -gt $numTrunkJavadocWarnings ]] ; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
#There are 12 warnings that are caused by things that are caused by using sun internal APIs.
|
||||
#There are 2 warnings that are caused by the Apache DS Dn class used in MiniKdc.
|
||||
OK_JAVADOC_WARNINGS=14;
|
||||
### if current warnings greater than OK_JAVADOC_WARNINGS
|
||||
if [[ $javadocWarnings -ne $OK_JAVADOC_WARNINGS ]] ; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 javadoc{color}. The javadoc tool appears to have generated `expr $(($javadocWarnings-$OK_JAVADOC_WARNINGS))` warning messages."
|
||||
return 1
|
||||
{color:red}-1 javadoc{color}. The javadoc tool appears to have generated `expr $(($numPatchJavadocWarnings-$numTrunkJavadocWarnings))` warning messages.
|
||||
See $BUILD_URL/artifact/trunk/patchprocess/diffJavadocWarnings.txt for details."
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:green}+1 javadoc{color}. The javadoc tool did not generate any warning messages."
|
||||
{color:green}+1 javadoc{color}. There were no new javadoc warning messages."
|
||||
return 0
|
||||
}
|
||||
|
||||
|
|
|
@ -113,6 +113,11 @@ Trunk (Unreleased)
|
|||
|
||||
HADOOP-10177. Create CLI tools for managing keys. (Larry McCay via omalley)
|
||||
|
||||
HADOOP-10244. TestKeyShell improperly tests the results of delete (Larry
|
||||
McCay via omalley)
|
||||
|
||||
HADOOP-10325. Improve jenkins javadoc warnings from test-patch.sh (cmccabe)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-9451. Fault single-layer config if node group topology is enabled.
|
||||
|
@ -313,6 +318,15 @@ Release 2.4.0 - UNRELEASED
|
|||
|
||||
HADOOP-10320. Javadoc in InterfaceStability.java lacks final </ul>.
|
||||
(René Nyffenegger via cnauroth)
|
||||
|
||||
HADOOP-10085. CompositeService should allow adding services while being
|
||||
inited. (Steve Loughran via kasha)
|
||||
|
||||
HADOOP-10327. Trunk windows build broken after HDFS-5746.
|
||||
(Vinay via cnauroth)
|
||||
|
||||
HADOOP-10330. TestFrameDecoder fails if it cannot bind port 12345.
|
||||
(Arpit Agarwal)
|
||||
|
||||
Release 2.3.0 - UNRELEASED
|
||||
|
||||
|
@ -685,6 +699,8 @@ Release 2.3.0 - UNRELEASED
|
|||
|
||||
HADOOP-10311. Cleanup vendor names from the code base. (tucu)
|
||||
|
||||
HADOOP-10273. Fix 'mvn site'. (Arpit Agarwal)
|
||||
|
||||
Release 2.2.0 - 2013-10-13
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -543,6 +543,7 @@
|
|||
<javahClassName>org.apache.hadoop.io.compress.bzip2.Bzip2Decompressor</javahClassName>
|
||||
<javahClassName>org.apache.hadoop.security.JniBasedUnixGroupsMapping</javahClassName>
|
||||
<javahClassName>org.apache.hadoop.io.nativeio.NativeIO</javahClassName>
|
||||
<javahClassName>org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory</javahClassName>
|
||||
<javahClassName>org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMapping</javahClassName>
|
||||
<javahClassName>org.apache.hadoop.io.compress.snappy.SnappyCompressor</javahClassName>
|
||||
<javahClassName>org.apache.hadoop.io.compress.snappy.SnappyDecompressor</javahClassName>
|
||||
|
@ -550,6 +551,7 @@
|
|||
<javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
|
||||
<javahClassName>org.apache.hadoop.util.NativeCrc32</javahClassName>
|
||||
<javahClassName>org.apache.hadoop.net.unix.DomainSocket</javahClassName>
|
||||
<javahClassName>org.apache.hadoop.net.unix.DomainSocketWatcher</javahClassName>
|
||||
</javahClassNames>
|
||||
<javahOutputDirectory>${project.build.directory}/native/javah</javahOutputDirectory>
|
||||
</configuration>
|
||||
|
|
|
@ -178,7 +178,9 @@ add_dual_library(hadoop
|
|||
${D}/io/nativeio/NativeIO.c
|
||||
${D}/io/nativeio/errno_enum.c
|
||||
${D}/io/nativeio/file_descriptor.c
|
||||
${D}/io/nativeio/SharedFileDescriptorFactory.c
|
||||
${D}/net/unix/DomainSocket.c
|
||||
${D}/net/unix/DomainSocketWatcher.c
|
||||
${D}/security/JniBasedUnixGroupsMapping.c
|
||||
${D}/security/JniBasedUnixGroupsNetgroupMapping.c
|
||||
${D}/security/hadoop_group_info.c
|
||||
|
|
|
@ -34,13 +34,14 @@ public class HttpConfig {
|
|||
HTTPS_ONLY,
|
||||
HTTP_AND_HTTPS;
|
||||
|
||||
private static final Policy[] VALUES = values();
|
||||
public static Policy fromString(String value) {
|
||||
if (HTTPS_ONLY.name().equalsIgnoreCase(value)) {
|
||||
return HTTPS_ONLY;
|
||||
} else if (HTTP_AND_HTTPS.name().equalsIgnoreCase(value)) {
|
||||
return HTTP_AND_HTTPS;
|
||||
for (Policy p : VALUES) {
|
||||
if (p.name().equalsIgnoreCase(value)) {
|
||||
return p;
|
||||
}
|
||||
}
|
||||
return HTTP_ONLY;
|
||||
return null;
|
||||
}
|
||||
|
||||
public boolean isHttpEnabled() {
|
||||
|
|
|
@ -487,6 +487,16 @@ public class NativeIO {
|
|||
new ConcurrentHashMap<Integer, CachedName>();
|
||||
|
||||
private enum IdCache { USER, GROUP }
|
||||
|
||||
public final static int MMAP_PROT_READ = 0x1;
|
||||
public final static int MMAP_PROT_WRITE = 0x2;
|
||||
public final static int MMAP_PROT_EXEC = 0x4;
|
||||
|
||||
public static native long mmap(FileDescriptor fd, int prot,
|
||||
boolean shared, long length) throws IOException;
|
||||
|
||||
public static native void munmap(long addr, long length)
|
||||
throws IOException;
|
||||
}
|
||||
|
||||
private static boolean workaroundNonThreadSafePasswdCalls = false;
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io.nativeio;
|
||||
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.FileDescriptor;
|
||||
|
||||
import org.apache.commons.lang.SystemUtils;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* A factory for creating shared file descriptors inside a given directory.
|
||||
* Typically, the directory will be /dev/shm or /tmp.
|
||||
*
|
||||
* We will hand out file descriptors that correspond to unlinked files residing
|
||||
* in that directory. These file descriptors are suitable for sharing across
|
||||
* multiple processes and are both readable and writable.
|
||||
*
|
||||
* Because we unlink the temporary files right after creating them, a JVM crash
|
||||
* usually does not leave behind any temporary files in the directory. However,
|
||||
* it may happen that we crash right after creating the file and before
|
||||
* unlinking it. In the constructor, we attempt to clean up after any such
|
||||
* remnants by trying to unlink any temporary files created by previous
|
||||
* SharedFileDescriptorFactory instances that also used our prefix.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class SharedFileDescriptorFactory {
|
||||
private final String prefix;
|
||||
private final String path;
|
||||
|
||||
/**
|
||||
* Create a SharedFileDescriptorFactory.
|
||||
*
|
||||
* @param prefix Prefix to add to all file names we use.
|
||||
* @param path Path to use.
|
||||
*/
|
||||
public SharedFileDescriptorFactory(String prefix, String path)
|
||||
throws IOException {
|
||||
Preconditions.checkArgument(NativeIO.isAvailable());
|
||||
Preconditions.checkArgument(SystemUtils.IS_OS_UNIX);
|
||||
this.prefix = prefix;
|
||||
this.path = path;
|
||||
deleteStaleTemporaryFiles0(prefix, path);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a shared file descriptor which will be both readable and writable.
|
||||
*
|
||||
* @param length The starting file length.
|
||||
*
|
||||
* @return The file descriptor, wrapped in a FileInputStream.
|
||||
* @throws IOException If there was an I/O or configuration error creating
|
||||
* the descriptor.
|
||||
*/
|
||||
public FileInputStream createDescriptor(int length) throws IOException {
|
||||
return new FileInputStream(createDescriptor0(prefix, path, length));
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete temporary files in the directory, NOT following symlinks.
|
||||
*/
|
||||
private static native void deleteStaleTemporaryFiles0(String prefix,
|
||||
String path) throws IOException;
|
||||
|
||||
/**
|
||||
* Create a file with O_EXCL, and then resize it to the desired size.
|
||||
*/
|
||||
private static native FileDescriptor createDescriptor0(String prefix,
|
||||
String path, int length) throws IOException;
|
||||
}
|
|
@ -151,6 +151,13 @@ public class RetryPolicies {
|
|||
delayMillis, maxDelayBase);
|
||||
}
|
||||
|
||||
public static final RetryPolicy failoverOnNetworkException(
|
||||
RetryPolicy fallbackPolicy, int maxFailovers, int maxRetries,
|
||||
long delayMillis, long maxDelayBase) {
|
||||
return new FailoverOnNetworkExceptionRetry(fallbackPolicy, maxFailovers,
|
||||
maxRetries, delayMillis, maxDelayBase);
|
||||
}
|
||||
|
||||
static class TryOnceThenFail implements RetryPolicy {
|
||||
@Override
|
||||
public RetryAction shouldRetry(Exception e, int retries, int failovers,
|
||||
|
@ -516,18 +523,25 @@ public class RetryPolicies {
|
|||
|
||||
private RetryPolicy fallbackPolicy;
|
||||
private int maxFailovers;
|
||||
private int maxRetries;
|
||||
private long delayMillis;
|
||||
private long maxDelayBase;
|
||||
|
||||
public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
|
||||
int maxFailovers) {
|
||||
this(fallbackPolicy, maxFailovers, 0, 0);
|
||||
this(fallbackPolicy, maxFailovers, 0, 0, 0);
|
||||
}
|
||||
|
||||
public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
|
||||
int maxFailovers, long delayMillis, long maxDelayBase) {
|
||||
this(fallbackPolicy, maxFailovers, 0, delayMillis, maxDelayBase);
|
||||
}
|
||||
|
||||
public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
|
||||
int maxFailovers, int maxRetries, long delayMillis, long maxDelayBase) {
|
||||
this.fallbackPolicy = fallbackPolicy;
|
||||
this.maxFailovers = maxFailovers;
|
||||
this.maxRetries = maxRetries;
|
||||
this.delayMillis = delayMillis;
|
||||
this.maxDelayBase = maxDelayBase;
|
||||
}
|
||||
|
@ -549,6 +563,10 @@ public class RetryPolicies {
|
|||
"failovers (" + failovers + ") exceeded maximum allowed ("
|
||||
+ maxFailovers + ")");
|
||||
}
|
||||
if (retries - failovers > maxRetries) {
|
||||
return new RetryAction(RetryAction.RetryDecision.FAIL, 0, "retries ("
|
||||
+ retries + ") exceeded maximum allowed (" + maxRetries + ")");
|
||||
}
|
||||
|
||||
if (e instanceof ConnectException ||
|
||||
e instanceof NoRouteToHostException ||
|
||||
|
|
|
@ -450,6 +450,14 @@ public abstract class Server {
|
|||
serviceAuthorizationManager.refresh(conf, provider);
|
||||
}
|
||||
|
||||
/**
|
||||
* Refresh the service authorization ACL for the service handled by this server
|
||||
* using the specified Configuration.
|
||||
*/
|
||||
public void refreshServiceAclWithConfigration(Configuration conf,
|
||||
PolicyProvider provider) {
|
||||
serviceAuthorizationManager.refreshWithConfiguration(conf, provider);
|
||||
}
|
||||
/**
|
||||
* Returns a handle to the serviceAuthorizationManager (required in tests)
|
||||
* @return instance of ServiceAuthorizationManager for this server
|
||||
|
|
|
@ -24,17 +24,15 @@ import java.io.FileInputStream;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.SocketException;
|
||||
import java.nio.channels.AsynchronousCloseException;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.nio.channels.ReadableByteChannel;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.commons.lang.SystemUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
import org.apache.hadoop.util.CloseableReferenceCount;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
|
@ -132,104 +130,14 @@ public class DomainSocket implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Tracks the reference count of the file descriptor, and also whether it is
|
||||
* open or closed.
|
||||
* The socket reference count and closed bit.
|
||||
*/
|
||||
private static class Status {
|
||||
/**
|
||||
* Bit mask representing a closed domain socket.
|
||||
*/
|
||||
private static final int STATUS_CLOSED_MASK = 1 << 30;
|
||||
|
||||
/**
|
||||
* Status bits
|
||||
*
|
||||
* Bit 30: 0 = DomainSocket open, 1 = DomainSocket closed
|
||||
* Bits 29 to 0: the reference count.
|
||||
*/
|
||||
private final AtomicInteger bits = new AtomicInteger(0);
|
||||
|
||||
Status() { }
|
||||
|
||||
/**
|
||||
* Increment the reference count of the underlying file descriptor.
|
||||
*
|
||||
* @throws ClosedChannelException If the file descriptor is closed.
|
||||
*/
|
||||
void reference() throws ClosedChannelException {
|
||||
int curBits = bits.incrementAndGet();
|
||||
if ((curBits & STATUS_CLOSED_MASK) != 0) {
|
||||
bits.decrementAndGet();
|
||||
throw new ClosedChannelException();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrement the reference count of the underlying file descriptor.
|
||||
*
|
||||
* @param checkClosed Whether to throw an exception if the file
|
||||
* descriptor is closed.
|
||||
*
|
||||
* @throws AsynchronousCloseException If the file descriptor is closed and
|
||||
* checkClosed is set.
|
||||
*/
|
||||
void unreference(boolean checkClosed) throws AsynchronousCloseException {
|
||||
int newCount = bits.decrementAndGet();
|
||||
assert (newCount & ~STATUS_CLOSED_MASK) >= 0;
|
||||
if (checkClosed && ((newCount & STATUS_CLOSED_MASK) != 0)) {
|
||||
throw new AsynchronousCloseException();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if the file descriptor is currently open.
|
||||
*
|
||||
* @return True if the file descriptor is currently open.
|
||||
*/
|
||||
boolean isOpen() {
|
||||
return ((bits.get() & STATUS_CLOSED_MASK) == 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark the file descriptor as closed.
|
||||
*
|
||||
* Once the file descriptor is closed, it cannot be reopened.
|
||||
*
|
||||
* @return The current reference count.
|
||||
* @throws ClosedChannelException If someone else closes the file
|
||||
* descriptor before we do.
|
||||
*/
|
||||
int setClosed() throws ClosedChannelException {
|
||||
while (true) {
|
||||
int curBits = bits.get();
|
||||
if ((curBits & STATUS_CLOSED_MASK) != 0) {
|
||||
throw new ClosedChannelException();
|
||||
}
|
||||
if (bits.compareAndSet(curBits, curBits | STATUS_CLOSED_MASK)) {
|
||||
return curBits & (~STATUS_CLOSED_MASK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current reference count.
|
||||
*
|
||||
* @return The current reference count.
|
||||
*/
|
||||
int getReferenceCount() {
|
||||
return bits.get() & (~STATUS_CLOSED_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The socket status.
|
||||
*/
|
||||
private final Status status;
|
||||
final CloseableReferenceCount refCount;
|
||||
|
||||
/**
|
||||
* The file descriptor associated with this UNIX domain socket.
|
||||
*/
|
||||
private final int fd;
|
||||
final int fd;
|
||||
|
||||
/**
|
||||
* The path associated with this UNIX domain socket.
|
||||
|
@ -252,13 +160,21 @@ public class DomainSocket implements Closeable {
|
|||
private final DomainChannel channel = new DomainChannel();
|
||||
|
||||
private DomainSocket(String path, int fd) {
|
||||
this.status = new Status();
|
||||
this.refCount = new CloseableReferenceCount();
|
||||
this.fd = fd;
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
private static native int bind0(String path) throws IOException;
|
||||
|
||||
private void unreference(boolean checkClosed) throws ClosedChannelException {
|
||||
if (checkClosed) {
|
||||
refCount.unreferenceCheckClosed();
|
||||
} else {
|
||||
refCount.unreference();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new DomainSocket listening on the given path.
|
||||
*
|
||||
|
@ -308,14 +224,14 @@ public class DomainSocket implements Closeable {
|
|||
* @throws SocketTimeoutException If the accept timed out.
|
||||
*/
|
||||
public DomainSocket accept() throws IOException {
|
||||
status.reference();
|
||||
refCount.reference();
|
||||
boolean exc = true;
|
||||
try {
|
||||
DomainSocket ret = new DomainSocket(path, accept0(fd));
|
||||
exc = false;
|
||||
return ret;
|
||||
} finally {
|
||||
status.unreference(exc);
|
||||
unreference(exc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -335,14 +251,14 @@ public class DomainSocket implements Closeable {
|
|||
return new DomainSocket(path, fd);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if the file descriptor is currently open.
|
||||
*
|
||||
* @return True if the file descriptor is currently open.
|
||||
*/
|
||||
public boolean isOpen() {
|
||||
return status.isOpen();
|
||||
}
|
||||
/**
|
||||
* Return true if the file descriptor is currently open.
|
||||
*
|
||||
* @return True if the file descriptor is currently open.
|
||||
*/
|
||||
public boolean isOpen() {
|
||||
return refCount.isOpen();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The socket path.
|
||||
|
@ -381,20 +297,20 @@ public class DomainSocket implements Closeable {
|
|||
throws IOException;
|
||||
|
||||
public void setAttribute(int type, int size) throws IOException {
|
||||
status.reference();
|
||||
refCount.reference();
|
||||
boolean exc = true;
|
||||
try {
|
||||
setAttribute0(fd, type, size);
|
||||
exc = false;
|
||||
} finally {
|
||||
status.unreference(exc);
|
||||
unreference(exc);
|
||||
}
|
||||
}
|
||||
|
||||
private native int getAttribute0(int fd, int type) throws IOException;
|
||||
|
||||
public int getAttribute(int type) throws IOException {
|
||||
status.reference();
|
||||
refCount.reference();
|
||||
int attribute;
|
||||
boolean exc = true;
|
||||
try {
|
||||
|
@ -402,7 +318,7 @@ public class DomainSocket implements Closeable {
|
|||
exc = false;
|
||||
return attribute;
|
||||
} finally {
|
||||
status.unreference(exc);
|
||||
unreference(exc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -419,9 +335,9 @@ public class DomainSocket implements Closeable {
|
|||
@Override
|
||||
public void close() throws IOException {
|
||||
// Set the closed bit on this DomainSocket
|
||||
int refCount;
|
||||
int count;
|
||||
try {
|
||||
refCount = status.setClosed();
|
||||
count = refCount.setClosed();
|
||||
} catch (ClosedChannelException e) {
|
||||
// Someone else already closed the DomainSocket.
|
||||
return;
|
||||
|
@ -429,7 +345,7 @@ public class DomainSocket implements Closeable {
|
|||
// Wait for all references to go away
|
||||
boolean didShutdown = false;
|
||||
boolean interrupted = false;
|
||||
while (refCount > 0) {
|
||||
while (count > 0) {
|
||||
if (!didShutdown) {
|
||||
try {
|
||||
// Calling shutdown on the socket will interrupt blocking system
|
||||
|
@ -446,7 +362,7 @@ public class DomainSocket implements Closeable {
|
|||
} catch (InterruptedException e) {
|
||||
interrupted = true;
|
||||
}
|
||||
refCount = status.getReferenceCount();
|
||||
count = refCount.getReferenceCount();
|
||||
}
|
||||
|
||||
// At this point, nobody has a reference to the file descriptor,
|
||||
|
@ -478,13 +394,13 @@ public class DomainSocket implements Closeable {
|
|||
*/
|
||||
public void sendFileDescriptors(FileDescriptor descriptors[],
|
||||
byte jbuf[], int offset, int length) throws IOException {
|
||||
status.reference();
|
||||
refCount.reference();
|
||||
boolean exc = true;
|
||||
try {
|
||||
sendFileDescriptors0(fd, descriptors, jbuf, offset, length);
|
||||
exc = false;
|
||||
} finally {
|
||||
status.unreference(exc);
|
||||
unreference(exc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -515,14 +431,14 @@ public class DomainSocket implements Closeable {
|
|||
*/
|
||||
public int receiveFileDescriptors(FileDescriptor[] descriptors,
|
||||
byte jbuf[], int offset, int length) throws IOException {
|
||||
status.reference();
|
||||
refCount.reference();
|
||||
boolean exc = true;
|
||||
try {
|
||||
int nBytes = receiveFileDescriptors0(fd, descriptors, jbuf, offset, length);
|
||||
exc = false;
|
||||
return nBytes;
|
||||
} finally {
|
||||
status.unreference(exc);
|
||||
unreference(exc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -539,7 +455,7 @@ public class DomainSocket implements Closeable {
|
|||
for (int i = 0; i < streams.length; i++) {
|
||||
streams[i] = null;
|
||||
}
|
||||
status.reference();
|
||||
refCount.reference();
|
||||
try {
|
||||
int ret = receiveFileDescriptors0(fd, descriptors, buf, offset, length);
|
||||
for (int i = 0, j = 0; i < descriptors.length; i++) {
|
||||
|
@ -569,7 +485,7 @@ public class DomainSocket implements Closeable {
|
|||
}
|
||||
}
|
||||
}
|
||||
status.unreference(!success);
|
||||
unreference(!success);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -593,7 +509,7 @@ public class DomainSocket implements Closeable {
|
|||
public class DomainInputStream extends InputStream {
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
status.reference();
|
||||
refCount.reference();
|
||||
boolean exc = true;
|
||||
try {
|
||||
byte b[] = new byte[1];
|
||||
|
@ -601,33 +517,33 @@ public class DomainSocket implements Closeable {
|
|||
exc = false;
|
||||
return (ret >= 0) ? b[0] : -1;
|
||||
} finally {
|
||||
status.unreference(exc);
|
||||
unreference(exc);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte b[], int off, int len) throws IOException {
|
||||
status.reference();
|
||||
refCount.reference();
|
||||
boolean exc = true;
|
||||
try {
|
||||
int nRead = DomainSocket.readArray0(DomainSocket.this.fd, b, off, len);
|
||||
exc = false;
|
||||
return nRead;
|
||||
} finally {
|
||||
status.unreference(exc);
|
||||
unreference(exc);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int available() throws IOException {
|
||||
status.reference();
|
||||
refCount.reference();
|
||||
boolean exc = true;
|
||||
try {
|
||||
int nAvailable = DomainSocket.available0(DomainSocket.this.fd);
|
||||
exc = false;
|
||||
return nAvailable;
|
||||
} finally {
|
||||
status.unreference(exc);
|
||||
unreference(exc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -649,7 +565,7 @@ public class DomainSocket implements Closeable {
|
|||
|
||||
@Override
|
||||
public void write(int val) throws IOException {
|
||||
status.reference();
|
||||
refCount.reference();
|
||||
boolean exc = true;
|
||||
try {
|
||||
byte b[] = new byte[1];
|
||||
|
@ -657,19 +573,19 @@ public class DomainSocket implements Closeable {
|
|||
DomainSocket.writeArray0(DomainSocket.this.fd, b, 0, 1);
|
||||
exc = false;
|
||||
} finally {
|
||||
status.unreference(exc);
|
||||
unreference(exc);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte[] b, int off, int len) throws IOException {
|
||||
status.reference();
|
||||
boolean exc = true;
|
||||
refCount.reference();
|
||||
boolean exc = true;
|
||||
try {
|
||||
DomainSocket.writeArray0(DomainSocket.this.fd, b, off, len);
|
||||
exc = false;
|
||||
} finally {
|
||||
status.unreference(exc);
|
||||
unreference(exc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -688,7 +604,7 @@ public class DomainSocket implements Closeable {
|
|||
|
||||
@Override
|
||||
public int read(ByteBuffer dst) throws IOException {
|
||||
status.reference();
|
||||
refCount.reference();
|
||||
boolean exc = true;
|
||||
try {
|
||||
int nread = 0;
|
||||
|
@ -710,7 +626,7 @@ public class DomainSocket implements Closeable {
|
|||
exc = false;
|
||||
return nread;
|
||||
} finally {
|
||||
status.unreference(exc);
|
||||
unreference(exc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,478 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.net.unix;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.EOFException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.TreeMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import org.apache.commons.lang.SystemUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.util.concurrent.Uninterruptibles;
|
||||
|
||||
/**
|
||||
* The DomainSocketWatcher watches a set of domain sockets to see when they
|
||||
* become readable, or closed. When one of those events happens, it makes a
|
||||
* callback.
|
||||
*
|
||||
* See {@link DomainSocket} for more information about UNIX domain sockets.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate("HDFS")
|
||||
public final class DomainSocketWatcher extends Thread implements Closeable {
|
||||
static {
|
||||
if (SystemUtils.IS_OS_WINDOWS) {
|
||||
loadingFailureReason = "UNIX Domain sockets are not available on Windows.";
|
||||
} else if (!NativeCodeLoader.isNativeCodeLoaded()) {
|
||||
loadingFailureReason = "libhadoop cannot be loaded.";
|
||||
} else {
|
||||
String problem;
|
||||
try {
|
||||
anchorNative();
|
||||
problem = null;
|
||||
} catch (Throwable t) {
|
||||
problem = "DomainSocketWatcher#anchorNative got error: " +
|
||||
t.getMessage();
|
||||
}
|
||||
loadingFailureReason = problem;
|
||||
}
|
||||
}
|
||||
|
||||
static Log LOG = LogFactory.getLog(DomainSocketWatcher.class);
|
||||
|
||||
/**
|
||||
* The reason why DomainSocketWatcher is not available, or null if it is
|
||||
* available.
|
||||
*/
|
||||
private final static String loadingFailureReason;
|
||||
|
||||
/**
|
||||
* Initializes the native library code.
|
||||
*/
|
||||
private static native void anchorNative();
|
||||
|
||||
interface Handler {
|
||||
/**
|
||||
* Handles an event on a socket. An event may be the socket becoming
|
||||
* readable, or the remote end being closed.
|
||||
*
|
||||
* @param sock The socket that the event occurred on.
|
||||
* @return Whether we should close the socket.
|
||||
*/
|
||||
boolean handle(DomainSocket sock);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handler for {DomainSocketWatcher#notificationSockets[1]}
|
||||
*/
|
||||
private class NotificationHandler implements Handler {
|
||||
public boolean handle(DomainSocket sock) {
|
||||
try {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(this + ": NotificationHandler: doing a read on " +
|
||||
sock.fd);
|
||||
}
|
||||
if (sock.getInputStream().read() == -1) {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(this + ": NotificationHandler: got EOF on " + sock.fd);
|
||||
}
|
||||
throw new EOFException();
|
||||
}
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(this + ": NotificationHandler: read succeeded on " +
|
||||
sock.fd);
|
||||
}
|
||||
return false;
|
||||
} catch (IOException e) {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(this + ": NotificationHandler: setting closed to " +
|
||||
"true for " + sock.fd);
|
||||
}
|
||||
closed = true;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class Entry {
|
||||
final DomainSocket socket;
|
||||
final Handler handler;
|
||||
|
||||
Entry(DomainSocket socket, Handler handler) {
|
||||
this.socket = socket;
|
||||
this.handler = handler;
|
||||
}
|
||||
|
||||
DomainSocket getDomainSocket() {
|
||||
return socket;
|
||||
}
|
||||
|
||||
Handler getHandler() {
|
||||
return handler;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The FdSet is a set of file descriptors that gets passed to poll(2).
|
||||
* It contains a native memory segment, so that we don't have to copy
|
||||
* in the poll0 function.
|
||||
*/
|
||||
private static class FdSet {
|
||||
private long data;
|
||||
|
||||
private native static long alloc0();
|
||||
|
||||
FdSet() {
|
||||
data = alloc0();
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a file descriptor to the set.
|
||||
*
|
||||
* @param fd The file descriptor to add.
|
||||
*/
|
||||
native void add(int fd);
|
||||
|
||||
/**
|
||||
* Remove a file descriptor from the set.
|
||||
*
|
||||
* @param fd The file descriptor to remove.
|
||||
*/
|
||||
native void remove(int fd);
|
||||
|
||||
/**
|
||||
* Get an array containing all the FDs marked as readable.
|
||||
* Also clear the state of all FDs.
|
||||
*
|
||||
* @return An array containing all of the currently readable file
|
||||
* descriptors.
|
||||
*/
|
||||
native int[] getAndClearReadableFds();
|
||||
|
||||
/**
|
||||
* Close the object and de-allocate the memory used.
|
||||
*/
|
||||
native void close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Lock which protects toAdd, toRemove, and closed.
|
||||
*/
|
||||
private final ReentrantLock lock = new ReentrantLock();
|
||||
|
||||
/**
|
||||
* Condition variable which indicates that toAdd and toRemove have been
|
||||
* processed.
|
||||
*/
|
||||
private final Condition processedCond = lock.newCondition();
|
||||
|
||||
/**
|
||||
* Entries to add.
|
||||
*/
|
||||
private final LinkedList<Entry> toAdd =
|
||||
new LinkedList<Entry>();
|
||||
|
||||
/**
|
||||
* Entries to remove.
|
||||
*/
|
||||
private final TreeMap<Integer, DomainSocket> toRemove =
|
||||
new TreeMap<Integer, DomainSocket>();
|
||||
|
||||
/**
|
||||
* Maximum length of time to go between checking whether the interrupted
|
||||
* bit has been set for this thread.
|
||||
*/
|
||||
private final int interruptCheckPeriodMs;
|
||||
|
||||
/**
|
||||
* A pair of sockets used to wake up the thread after it has called poll(2).
|
||||
*/
|
||||
private final DomainSocket notificationSockets[];
|
||||
|
||||
/**
|
||||
* Whether or not this DomainSocketWatcher is closed.
|
||||
*/
|
||||
private boolean closed = false;
|
||||
|
||||
public DomainSocketWatcher(int interruptCheckPeriodMs) throws IOException {
|
||||
if (loadingFailureReason != null) {
|
||||
throw new UnsupportedOperationException(loadingFailureReason);
|
||||
}
|
||||
notificationSockets = DomainSocket.socketpair();
|
||||
this.interruptCheckPeriodMs = interruptCheckPeriodMs;
|
||||
Preconditions.checkArgument(interruptCheckPeriodMs > 0);
|
||||
watcherThread.start();
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the DomainSocketWatcher and wait for its thread to terminate.
|
||||
*
|
||||
* If there is more than one close, all but the first will be ignored.
|
||||
*/
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
lock.lock();
|
||||
if (closed) return;
|
||||
LOG.info(this + ": closing");
|
||||
closed = true;
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
// Close notificationSockets[0], so that notificationSockets[1] gets an EOF
|
||||
// event. This will wake up the thread immediately if it is blocked inside
|
||||
// the select() system call.
|
||||
notificationSockets[0].close();
|
||||
// Wait for the select thread to terminate.
|
||||
Uninterruptibles.joinUninterruptibly(watcherThread);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a socket.
|
||||
*
|
||||
* @param sock The socket to add. It is an error to re-add a socket that
|
||||
* we are already watching.
|
||||
* @param handler The handler to associate with this socket. This may be
|
||||
* called any time after this function is called.
|
||||
*/
|
||||
public void add(DomainSocket sock, Handler handler) {
|
||||
try {
|
||||
lock.lock();
|
||||
checkNotClosed();
|
||||
Entry entry = new Entry(sock, handler);
|
||||
try {
|
||||
sock.refCount.reference();
|
||||
} catch (ClosedChannelException e) {
|
||||
Preconditions.checkArgument(false,
|
||||
"tried to add a closed DomainSocket to " + this);
|
||||
}
|
||||
toAdd.add(entry);
|
||||
kick();
|
||||
while (true) {
|
||||
try {
|
||||
processedCond.await();
|
||||
} catch (InterruptedException e) {
|
||||
this.interrupt();
|
||||
}
|
||||
if (!toAdd.contains(entry)) {
|
||||
break;
|
||||
}
|
||||
checkNotClosed();
|
||||
}
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a socket. Its handler will be called.
|
||||
*
|
||||
* @param sock The socket to remove.
|
||||
*/
|
||||
public void remove(DomainSocket sock) {
|
||||
try {
|
||||
lock.lock();
|
||||
checkNotClosed();
|
||||
toRemove.put(sock.fd, sock);
|
||||
kick();
|
||||
while (true) {
|
||||
try {
|
||||
processedCond.await();
|
||||
} catch (InterruptedException e) {
|
||||
this.interrupt();
|
||||
}
|
||||
if (!toRemove.containsKey(sock.fd)) {
|
||||
break;
|
||||
}
|
||||
checkNotClosed();
|
||||
}
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Wake up the DomainSocketWatcher thread.
|
||||
*/
|
||||
private void kick() {
|
||||
try {
|
||||
notificationSockets[0].getOutputStream().write(0);
|
||||
} catch (IOException e) {
|
||||
LOG.error(this + ": error writing to notificationSockets[0]", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that the DomainSocketWatcher is not closed.
|
||||
* Must be called while holding the lock.
|
||||
*/
|
||||
private void checkNotClosed() {
|
||||
Preconditions.checkState(lock.isHeldByCurrentThread());
|
||||
if (closed) {
|
||||
throw new RuntimeException("DomainSocketWatcher is closed.");
|
||||
}
|
||||
}
|
||||
|
||||
private void sendCallback(String caller, TreeMap<Integer, Entry> entries,
|
||||
FdSet fdSet, int fd) {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(this + ": " + caller + " starting sendCallback for fd " + fd);
|
||||
}
|
||||
Entry entry = entries.get(fd);
|
||||
Preconditions.checkNotNull(entry,
|
||||
this + ": fdSet contained " + fd + ", which we were " +
|
||||
"not tracking.");
|
||||
DomainSocket sock = entry.getDomainSocket();
|
||||
if (entry.getHandler().handle(sock)) {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(this + ": " + caller + ": closing fd " + fd +
|
||||
" at the request of the handler.");
|
||||
}
|
||||
if (toRemove.remove(fd) != null) {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(this + ": " + caller + " : sendCallback processed fd " +
|
||||
fd + " in toRemove.");
|
||||
}
|
||||
}
|
||||
try {
|
||||
sock.refCount.unreferenceCheckClosed();
|
||||
} catch (IOException e) {
|
||||
Preconditions.checkArgument(false,
|
||||
this + ": file descriptor " + sock.fd + " was closed while " +
|
||||
"still in the poll(2) loop.");
|
||||
}
|
||||
IOUtils.cleanup(LOG, sock);
|
||||
entries.remove(fd);
|
||||
fdSet.remove(fd);
|
||||
} else {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(this + ": " + caller + ": sendCallback not " +
|
||||
"closing fd " + fd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private final Thread watcherThread = new Thread(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
LOG.info(this + ": starting with interruptCheckPeriodMs = " +
|
||||
interruptCheckPeriodMs);
|
||||
final TreeMap<Integer, Entry> entries = new TreeMap<Integer, Entry>();
|
||||
FdSet fdSet = new FdSet();
|
||||
addNotificationSocket(entries, fdSet);
|
||||
try {
|
||||
while (true) {
|
||||
lock.lock();
|
||||
try {
|
||||
for (int fd : fdSet.getAndClearReadableFds()) {
|
||||
sendCallback("getAndClearReadableFds", entries, fdSet, fd);
|
||||
}
|
||||
if (!(toAdd.isEmpty() && toRemove.isEmpty())) {
|
||||
// Handle pending additions (before pending removes).
|
||||
for (Iterator<Entry> iter = toAdd.iterator(); iter.hasNext(); ) {
|
||||
Entry entry = iter.next();
|
||||
DomainSocket sock = entry.getDomainSocket();
|
||||
Entry prevEntry = entries.put(sock.fd, entry);
|
||||
Preconditions.checkState(prevEntry == null,
|
||||
this + ": tried to watch a file descriptor that we " +
|
||||
"were already watching: " + sock);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(this + ": adding fd " + sock.fd);
|
||||
}
|
||||
fdSet.add(sock.fd);
|
||||
iter.remove();
|
||||
}
|
||||
// Handle pending removals
|
||||
while (true) {
|
||||
Map.Entry<Integer, DomainSocket> entry = toRemove.firstEntry();
|
||||
if (entry == null) break;
|
||||
sendCallback("handlePendingRemovals",
|
||||
entries, fdSet, entry.getValue().fd);
|
||||
}
|
||||
processedCond.signalAll();
|
||||
}
|
||||
// Check if the thread should terminate. Doing this check now is
|
||||
// easier than at the beginning of the loop, since we know toAdd and
|
||||
// toRemove are now empty and processedCond has been notified if it
|
||||
// needed to be.
|
||||
if (closed) {
|
||||
LOG.info(toString() + " thread terminating.");
|
||||
return;
|
||||
}
|
||||
// Check if someone sent our thread an InterruptedException while we
|
||||
// were waiting in poll().
|
||||
if (Thread.interrupted()) {
|
||||
throw new InterruptedException();
|
||||
}
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
doPoll0(interruptCheckPeriodMs, fdSet);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
LOG.info(toString() + " terminating on InterruptedException");
|
||||
} catch (IOException e) {
|
||||
LOG.error(toString() + " terminating on IOException", e);
|
||||
} finally {
|
||||
for (Entry entry : entries.values()) {
|
||||
sendCallback("close", entries, fdSet, entry.getDomainSocket().fd);
|
||||
}
|
||||
entries.clear();
|
||||
fdSet.close();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
private void addNotificationSocket(final TreeMap<Integer, Entry> entries,
|
||||
FdSet fdSet) {
|
||||
entries.put(notificationSockets[1].fd,
|
||||
new Entry(notificationSockets[1], new NotificationHandler()));
|
||||
try {
|
||||
notificationSockets[1].refCount.reference();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
fdSet.add(notificationSockets[1].fd);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(this + ": adding notificationSocket " +
|
||||
notificationSockets[1].fd + ", connected to " +
|
||||
notificationSockets[0].fd);
|
||||
}
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "DomainSocketWatcher(" + System.identityHashCode(this) + ")";
|
||||
}
|
||||
|
||||
private static native int doPoll0(int maxWaitMs, FdSet readFds)
|
||||
throws IOException;
|
||||
}
|
|
@ -30,6 +30,8 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public class ProxyUsers {
|
||||
|
||||
|
@ -177,4 +179,13 @@ public class ProxyUsers {
|
|||
(list.contains("*"));
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public static Map<String, Collection<String>> getProxyGroups() {
|
||||
return proxyGroups;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public static Map<String, Collection<String>> getProxyHosts() {
|
||||
return proxyHosts;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,8 @@ import org.apache.hadoop.security.KerberosInfo;
|
|||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
/**
|
||||
* An authorization manager which handles service-level authorization
|
||||
* for incoming service requests.
|
||||
|
@ -120,19 +122,23 @@ public class ServiceAuthorizationManager {
|
|||
// Make a copy of the original config, and load the policy file
|
||||
Configuration policyConf = new Configuration(conf);
|
||||
policyConf.addResource(policyFile);
|
||||
|
||||
refreshWithConfiguration(policyConf, provider);
|
||||
}
|
||||
|
||||
public synchronized void refreshWithConfiguration(Configuration conf,
|
||||
PolicyProvider provider) {
|
||||
final Map<Class<?>, AccessControlList> newAcls =
|
||||
new IdentityHashMap<Class<?>, AccessControlList>();
|
||||
new IdentityHashMap<Class<?>, AccessControlList>();
|
||||
|
||||
// Parse the config file
|
||||
Service[] services = provider.getServices();
|
||||
if (services != null) {
|
||||
for (Service service : services) {
|
||||
AccessControlList acl =
|
||||
new AccessControlList(
|
||||
policyConf.get(service.getServiceKey(),
|
||||
AccessControlList.WILDCARD_ACL_VALUE)
|
||||
);
|
||||
AccessControlList acl =
|
||||
new AccessControlList(
|
||||
conf.get(service.getServiceKey(),
|
||||
AccessControlList.WILDCARD_ACL_VALUE)
|
||||
);
|
||||
newAcls.put(service.getProtocol(), acl);
|
||||
}
|
||||
}
|
||||
|
@ -141,8 +147,13 @@ public class ServiceAuthorizationManager {
|
|||
protocolToAcl = newAcls;
|
||||
}
|
||||
|
||||
// Package-protected for use in tests.
|
||||
Set<Class<?>> getProtocolsWithAcls() {
|
||||
@VisibleForTesting
|
||||
public Set<Class<?>> getProtocolsWithAcls() {
|
||||
return protocolToAcl.keySet();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public AccessControlList getProtocolsAcls(Class<?> className) {
|
||||
return protocolToAcl.get(className);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.apache.hadoop.service;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -54,13 +53,13 @@ public class CompositeService extends AbstractService {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get an unmodifiable list of services
|
||||
* Get a cloned list of services
|
||||
* @return a list of child services at the time of invocation -
|
||||
* added services will not be picked up.
|
||||
*/
|
||||
public List<Service> getServices() {
|
||||
synchronized (serviceList) {
|
||||
return Collections.unmodifiableList(serviceList);
|
||||
return new ArrayList<Service>(serviceList);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,125 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import java.nio.channels.AsynchronousCloseException;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* A closeable object that maintains a reference count.
|
||||
*
|
||||
* Once the object is closed, attempting to take a new reference will throw
|
||||
* ClosedChannelException.
|
||||
*/
|
||||
public class CloseableReferenceCount {
|
||||
/**
|
||||
* Bit mask representing a closed domain socket.
|
||||
*/
|
||||
private static final int STATUS_CLOSED_MASK = 1 << 30;
|
||||
|
||||
/**
|
||||
* The status bits.
|
||||
*
|
||||
* Bit 30: 0 = open, 1 = closed.
|
||||
* Bits 29 to 0: the reference count.
|
||||
*/
|
||||
private final AtomicInteger status = new AtomicInteger(0);
|
||||
|
||||
public CloseableReferenceCount() { }
|
||||
|
||||
/**
|
||||
* Increment the reference count.
|
||||
*
|
||||
* @throws ClosedChannelException If the status is closed.
|
||||
*/
|
||||
public void reference() throws ClosedChannelException {
|
||||
int curBits = status.incrementAndGet();
|
||||
if ((curBits & STATUS_CLOSED_MASK) != 0) {
|
||||
status.decrementAndGet();
|
||||
throw new ClosedChannelException();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrement the reference count.
|
||||
*
|
||||
* @return True if the object is closed and has no outstanding
|
||||
* references.
|
||||
*/
|
||||
public boolean unreference() {
|
||||
int newVal = status.decrementAndGet();
|
||||
Preconditions.checkState(newVal != 0xffffffff,
|
||||
"called unreference when the reference count was already at 0.");
|
||||
return newVal == STATUS_CLOSED_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrement the reference count, checking to make sure that the
|
||||
* CloseableReferenceCount is not closed.
|
||||
*
|
||||
* @throws AsynchronousCloseException If the status is closed.
|
||||
*/
|
||||
public void unreferenceCheckClosed() throws ClosedChannelException {
|
||||
int newVal = status.decrementAndGet();
|
||||
if ((newVal & STATUS_CLOSED_MASK) != 0) {
|
||||
throw new AsynchronousCloseException();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if the status is currently open.
|
||||
*
|
||||
* @return True if the status is currently open.
|
||||
*/
|
||||
public boolean isOpen() {
|
||||
return ((status.get() & STATUS_CLOSED_MASK) == 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark the status as closed.
|
||||
*
|
||||
* Once the status is closed, it cannot be reopened.
|
||||
*
|
||||
* @return The current reference count.
|
||||
* @throws ClosedChannelException If someone else closes the object
|
||||
* before we do.
|
||||
*/
|
||||
public int setClosed() throws ClosedChannelException {
|
||||
while (true) {
|
||||
int curBits = status.get();
|
||||
if ((curBits & STATUS_CLOSED_MASK) != 0) {
|
||||
throw new ClosedChannelException();
|
||||
}
|
||||
if (status.compareAndSet(curBits, curBits | STATUS_CLOSED_MASK)) {
|
||||
return curBits & (~STATUS_CLOSED_MASK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current reference count.
|
||||
*
|
||||
* @return The current reference count.
|
||||
*/
|
||||
public int getReferenceCount() {
|
||||
return status.get() & (~STATUS_CLOSED_MASK);
|
||||
}
|
||||
}
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include "org_apache_hadoop.h"
|
||||
#include "org_apache_hadoop_io_nativeio_NativeIO.h"
|
||||
#include "org_apache_hadoop_io_nativeio_NativeIO_POSIX.h"
|
||||
|
||||
#ifdef UNIX
|
||||
#include <assert.h>
|
||||
|
@ -49,6 +50,10 @@
|
|||
#include "file_descriptor.h"
|
||||
#include "errno_enum.h"
|
||||
|
||||
#define MMAP_PROT_READ org_apache_hadoop_io_nativeio_NativeIO_POSIX_MMAP_PROT_READ
|
||||
#define MMAP_PROT_WRITE org_apache_hadoop_io_nativeio_NativeIO_POSIX_MMAP_PROT_WRITE
|
||||
#define MMAP_PROT_EXEC org_apache_hadoop_io_nativeio_NativeIO_POSIX_MMAP_PROT_EXEC
|
||||
|
||||
// the NativeIO$POSIX$Stat inner class and its constructor
|
||||
static jclass stat_clazz;
|
||||
static jmethodID stat_ctor;
|
||||
|
@ -661,6 +666,55 @@ cleanup:
|
|||
#endif
|
||||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL
|
||||
Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mmap(
|
||||
JNIEnv *env, jclass clazz, jobject jfd, jint jprot,
|
||||
jboolean jshared, jlong length)
|
||||
{
|
||||
#ifdef UNIX
|
||||
void *addr = 0;
|
||||
int prot, flags, fd;
|
||||
|
||||
prot = ((jprot & MMAP_PROT_READ) ? PROT_READ : 0) |
|
||||
((jprot & MMAP_PROT_WRITE) ? PROT_WRITE : 0) |
|
||||
((jprot & MMAP_PROT_EXEC) ? PROT_EXEC : 0);
|
||||
flags = (jshared == JNI_TRUE) ? MAP_SHARED : MAP_PRIVATE;
|
||||
fd = fd_get(env, jfd);
|
||||
addr = mmap(NULL, length, prot, flags, fd, 0);
|
||||
if (addr == MAP_FAILED) {
|
||||
throw_ioe(env, errno);
|
||||
}
|
||||
return (jlong)(intptr_t)addr;
|
||||
#endif // UNIX
|
||||
|
||||
#ifdef WINDOWS
|
||||
THROW(env, "java/io/IOException",
|
||||
"The function POSIX.mmap() is not supported on Windows");
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munmap(
|
||||
JNIEnv *env, jclass clazz, jlong jaddr, jlong length)
|
||||
{
|
||||
#ifdef UNIX
|
||||
void *addr;
|
||||
|
||||
addr = (void*)(intptr_t)jaddr;
|
||||
if (munmap(addr, length) < 0) {
|
||||
throw_ioe(env, errno);
|
||||
}
|
||||
#endif // UNIX
|
||||
|
||||
#ifdef WINDOWS
|
||||
THROW(env, "java/io/IOException",
|
||||
"The function POSIX.munmap() is not supported on Windows");
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* static native String getGroupName(int gid);
|
||||
*
|
||||
|
@ -1012,4 +1066,3 @@ JNIEnv *env, jclass clazz)
|
|||
/**
|
||||
* vim: sw=2: ts=2: et:
|
||||
*/
|
||||
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "org_apache_hadoop.h"
|
||||
|
||||
#ifdef UNIX
|
||||
|
||||
#include "exception.h"
|
||||
#include "file_descriptor.h"
|
||||
#include "org_apache_hadoop.h"
|
||||
#include "org_apache_hadoop_io_nativeio_SharedFileDescriptorFactory.h"
|
||||
|
||||
#include <dirent.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <limits.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static pthread_mutex_t g_rand_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
Java_org_apache_hadoop_io_nativeio_SharedFileDescriptorFactory_deleteStaleTemporaryFiles0(
|
||||
JNIEnv *env, jclass clazz, jstring jprefix, jstring jpath)
|
||||
{
|
||||
const char *prefix = NULL, *path = NULL;
|
||||
char target[PATH_MAX];
|
||||
jthrowable jthr;
|
||||
DIR *dp = NULL;
|
||||
struct dirent *de;
|
||||
|
||||
prefix = (*env)->GetStringUTFChars(env, jprefix, NULL);
|
||||
if (!prefix) goto done; // exception raised
|
||||
path = (*env)->GetStringUTFChars(env, jpath, NULL);
|
||||
if (!path) goto done; // exception raised
|
||||
|
||||
dp = opendir(path);
|
||||
if (!dp) {
|
||||
int ret = errno;
|
||||
jthr = newIOException(env, "opendir(%s) error %d: %s",
|
||||
path, ret, terror(ret));
|
||||
(*env)->Throw(env, jthr);
|
||||
goto done;
|
||||
}
|
||||
while ((de = readdir(dp))) {
|
||||
if (strncmp(prefix, de->d_name, strlen(prefix)) == 0) {
|
||||
int ret = snprintf(target, PATH_MAX, "%s/%s", path, de->d_name);
|
||||
if ((0 < ret) && (ret < PATH_MAX)) {
|
||||
unlink(target);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
if (dp) {
|
||||
closedir(dp);
|
||||
}
|
||||
if (prefix) {
|
||||
(*env)->ReleaseStringUTFChars(env, jprefix, prefix);
|
||||
}
|
||||
if (path) {
|
||||
(*env)->ReleaseStringUTFChars(env, jpath, path);
|
||||
}
|
||||
}
|
||||
|
||||
JNIEXPORT jobject JNICALL
|
||||
Java_org_apache_hadoop_io_nativeio_SharedFileDescriptorFactory_createDescriptor0(
|
||||
JNIEnv *env, jclass clazz, jstring jprefix, jstring jpath, jint length)
|
||||
{
|
||||
const char *prefix = NULL, *path = NULL;
|
||||
char target[PATH_MAX];
|
||||
int ret, fd = -1, rnd;
|
||||
jthrowable jthr;
|
||||
jobject jret = NULL;
|
||||
|
||||
prefix = (*env)->GetStringUTFChars(env, jprefix, NULL);
|
||||
if (!prefix) goto done; // exception raised
|
||||
path = (*env)->GetStringUTFChars(env, jpath, NULL);
|
||||
if (!path) goto done; // exception raised
|
||||
|
||||
pthread_mutex_lock(&g_rand_lock);
|
||||
rnd = rand();
|
||||
pthread_mutex_unlock(&g_rand_lock);
|
||||
while (1) {
|
||||
ret = snprintf(target, PATH_MAX, "%s/%s_%d",
|
||||
path, prefix, rnd);
|
||||
if (ret < 0) {
|
||||
jthr = newIOException(env, "snprintf error");
|
||||
(*env)->Throw(env, jthr);
|
||||
goto done;
|
||||
} else if (ret >= PATH_MAX) {
|
||||
jthr = newIOException(env, "computed path was too long.");
|
||||
(*env)->Throw(env, jthr);
|
||||
goto done;
|
||||
}
|
||||
fd = open(target, O_CREAT | O_EXCL | O_RDWR, 0700);
|
||||
if (fd >= 0) break; // success
|
||||
ret = errno;
|
||||
if (ret == EEXIST) {
|
||||
// Bad luck -- we got a very rare collision here between us and
|
||||
// another DataNode (or process). Try again.
|
||||
continue;
|
||||
} else if (ret == EINTR) {
|
||||
// Most of the time, this error is only possible when opening FIFOs.
|
||||
// But let's be thorough.
|
||||
continue;
|
||||
}
|
||||
jthr = newIOException(env, "open(%s, O_CREAT | O_EXCL | O_RDWR) "
|
||||
"failed: error %d (%s)", target, ret, terror(ret));
|
||||
(*env)->Throw(env, jthr);
|
||||
goto done;
|
||||
}
|
||||
if (unlink(target) < 0) {
|
||||
jthr = newIOException(env, "unlink(%s) failed: error %d (%s)",
|
||||
path, ret, terror(ret));
|
||||
(*env)->Throw(env, jthr);
|
||||
goto done;
|
||||
}
|
||||
if (ftruncate(fd, length) < 0) {
|
||||
jthr = newIOException(env, "ftruncate(%s, %d) failed: error %d (%s)",
|
||||
path, length, ret, terror(ret));
|
||||
(*env)->Throw(env, jthr);
|
||||
goto done;
|
||||
}
|
||||
jret = fd_create(env, fd); // throws exception on error.
|
||||
|
||||
done:
|
||||
if (prefix) {
|
||||
(*env)->ReleaseStringUTFChars(env, jprefix, prefix);
|
||||
}
|
||||
if (path) {
|
||||
(*env)->ReleaseStringUTFChars(env, jpath, path);
|
||||
}
|
||||
if (!jret) {
|
||||
if (fd >= 0) {
|
||||
close(fd);
|
||||
}
|
||||
}
|
||||
return jret;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,247 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
#include "exception.h"
|
||||
#include "org_apache_hadoop.h"
|
||||
#include "org_apache_hadoop_net_unix_DomainSocketWatcher.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <jni.h>
|
||||
#include <poll.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/select.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static jfieldID fd_set_data_fid;
|
||||
|
||||
#define FD_SET_DATA_MIN_SIZE 2
|
||||
|
||||
struct fd_set_data {
|
||||
/**
|
||||
* Number of fds we have allocated space for.
|
||||
*/
|
||||
int alloc_size;
|
||||
|
||||
/**
|
||||
* Number of fds actually in use.
|
||||
*/
|
||||
int used_size;
|
||||
|
||||
/**
|
||||
* Beginning of pollfd data.
|
||||
*/
|
||||
struct pollfd pollfd[0];
|
||||
};
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
Java_org_apache_hadoop_net_unix_DomainSocketWatcher_anchorNative(
|
||||
JNIEnv *env, jclass clazz)
|
||||
{
|
||||
jclass fd_set_class;
|
||||
|
||||
fd_set_class = (*env)->FindClass(env,
|
||||
"org/apache/hadoop/net/unix/DomainSocketWatcher$FdSet");
|
||||
if (!fd_set_class) return; // exception raised
|
||||
fd_set_data_fid = (*env)->GetFieldID(env, fd_set_class, "data", "J");
|
||||
if (!fd_set_data_fid) return; // exception raised
|
||||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL
|
||||
Java_org_apache_hadoop_net_unix_DomainSocketWatcher_00024FdSet_alloc0(
|
||||
JNIEnv *env, jclass clazz)
|
||||
{
|
||||
struct fd_set_data *sd;
|
||||
|
||||
sd = calloc(1, sizeof(struct fd_set_data) +
|
||||
(sizeof(struct pollfd) * FD_SET_DATA_MIN_SIZE));
|
||||
if (!sd) {
|
||||
(*env)->Throw(env, newRuntimeException(env, "out of memory allocating "
|
||||
"DomainSocketWatcher#FdSet"));
|
||||
return 0L;
|
||||
}
|
||||
sd->alloc_size = FD_SET_DATA_MIN_SIZE;
|
||||
sd->used_size = 0;
|
||||
return (jlong)(intptr_t)sd;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
Java_org_apache_hadoop_net_unix_DomainSocketWatcher_00024FdSet_add(
|
||||
JNIEnv *env, jobject obj, jint fd)
|
||||
{
|
||||
struct fd_set_data *sd, *nd;
|
||||
struct pollfd *pollfd;
|
||||
|
||||
sd = (struct fd_set_data*)(intptr_t)(*env)->
|
||||
GetLongField(env, obj, fd_set_data_fid);
|
||||
if (sd->used_size + 1 > sd->alloc_size) {
|
||||
nd = realloc(sd, sizeof(struct fd_set_data) +
|
||||
(sizeof(struct pollfd) * sd->alloc_size * 2));
|
||||
if (!nd) {
|
||||
(*env)->Throw(env, newRuntimeException(env, "out of memory adding "
|
||||
"another fd to DomainSocketWatcher#FdSet. we have %d already",
|
||||
sd->alloc_size));
|
||||
return;
|
||||
}
|
||||
nd->alloc_size = nd->alloc_size * 2;
|
||||
(*env)->SetLongField(env, obj, fd_set_data_fid, (jlong)(intptr_t)nd);
|
||||
sd = nd;
|
||||
}
|
||||
pollfd = &sd->pollfd[sd->used_size];
|
||||
sd->used_size++;
|
||||
pollfd->fd = fd;
|
||||
pollfd->events = POLLIN;
|
||||
pollfd->revents = 0;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
Java_org_apache_hadoop_net_unix_DomainSocketWatcher_00024FdSet_remove(
|
||||
JNIEnv *env, jobject obj, jint fd)
|
||||
{
|
||||
struct fd_set_data *sd;
|
||||
struct pollfd *pollfd, *last_pollfd;
|
||||
int used_size, i;
|
||||
|
||||
sd = (struct fd_set_data*)(intptr_t)(*env)->
|
||||
GetLongField(env, obj, fd_set_data_fid);
|
||||
used_size = sd->used_size;
|
||||
for (i = 0; i < used_size; i++) {
|
||||
pollfd = sd->pollfd + i;
|
||||
if (pollfd->fd == fd) break;
|
||||
}
|
||||
if (i == used_size) {
|
||||
(*env)->Throw(env, newRuntimeException(env, "failed to remove fd %d "
|
||||
"from the FdSet because it was never present.", fd));
|
||||
return;
|
||||
}
|
||||
last_pollfd = sd->pollfd + (used_size - 1);
|
||||
if (used_size > 1) {
|
||||
// Move last pollfd to the new empty slot if needed
|
||||
pollfd->fd = last_pollfd->fd;
|
||||
pollfd->events = last_pollfd->events;
|
||||
pollfd->revents = last_pollfd->revents;
|
||||
}
|
||||
memset(last_pollfd, 0, sizeof(struct pollfd));
|
||||
sd->used_size--;
|
||||
}
|
||||
|
||||
JNIEXPORT jobject JNICALL
|
||||
Java_org_apache_hadoop_net_unix_DomainSocketWatcher_00024FdSet_getAndClearReadableFds(
|
||||
JNIEnv *env, jobject obj)
|
||||
{
|
||||
int *carr = NULL;
|
||||
jobject jarr = NULL;
|
||||
struct fd_set_data *sd;
|
||||
int used_size, num_readable = 0, i, j;
|
||||
jthrowable jthr = NULL;
|
||||
|
||||
sd = (struct fd_set_data*)(intptr_t)(*env)->
|
||||
GetLongField(env, obj, fd_set_data_fid);
|
||||
used_size = sd->used_size;
|
||||
for (i = 0; i < used_size; i++) {
|
||||
if (sd->pollfd[i].revents & POLLIN) {
|
||||
num_readable++;
|
||||
} else {
|
||||
sd->pollfd[i].revents = 0;
|
||||
}
|
||||
}
|
||||
if (num_readable > 0) {
|
||||
carr = malloc(sizeof(int) * num_readable);
|
||||
if (!carr) {
|
||||
jthr = newRuntimeException(env, "failed to allocate a temporary array "
|
||||
"of %d ints", num_readable);
|
||||
goto done;
|
||||
}
|
||||
j = 0;
|
||||
for (i = 0; ((i < used_size) && (j < num_readable)); i++) {
|
||||
if (sd->pollfd[i].revents & POLLIN) {
|
||||
carr[j] = sd->pollfd[i].fd;
|
||||
j++;
|
||||
sd->pollfd[i].revents = 0;
|
||||
}
|
||||
}
|
||||
if (j != num_readable) {
|
||||
jthr = newRuntimeException(env, "failed to fill entire carr "
|
||||
"array of size %d: only filled %d elements", num_readable, j);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
jarr = (*env)->NewIntArray(env, num_readable);
|
||||
if (!jarr) {
|
||||
jthr = (*env)->ExceptionOccurred(env);
|
||||
(*env)->ExceptionClear(env);
|
||||
goto done;
|
||||
}
|
||||
if (num_readable > 0) {
|
||||
(*env)->SetIntArrayRegion(env, jarr, 0, num_readable, carr);
|
||||
jthr = (*env)->ExceptionOccurred(env);
|
||||
if (jthr) {
|
||||
(*env)->ExceptionClear(env);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
free(carr);
|
||||
if (jthr) {
|
||||
(*env)->DeleteLocalRef(env, jarr);
|
||||
jarr = NULL;
|
||||
}
|
||||
return jarr;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
Java_org_apache_hadoop_net_unix_DomainSocketWatcher_00024FdSet_close(
|
||||
JNIEnv *env, jobject obj)
|
||||
{
|
||||
struct fd_set_data *sd;
|
||||
|
||||
sd = (struct fd_set_data*)(intptr_t)(*env)->
|
||||
GetLongField(env, obj, fd_set_data_fid);
|
||||
if (sd) {
|
||||
free(sd);
|
||||
(*env)->SetLongField(env, obj, fd_set_data_fid, 0L);
|
||||
}
|
||||
}
|
||||
|
||||
JNIEXPORT jint JNICALL
|
||||
Java_org_apache_hadoop_net_unix_DomainSocketWatcher_doPoll0(
|
||||
JNIEnv *env, jclass clazz, jint checkMs, jobject fdSet)
|
||||
{
|
||||
struct fd_set_data *sd;
|
||||
int ret, err;
|
||||
|
||||
sd = (struct fd_set_data*)(intptr_t)(*env)->
|
||||
GetLongField(env, fdSet, fd_set_data_fid);
|
||||
ret = poll(sd->pollfd, sd->used_size, checkMs);
|
||||
if (ret >= 0) {
|
||||
return ret;
|
||||
}
|
||||
err = errno;
|
||||
if (err != EINTR) { // treat EINTR as 0 fds ready
|
||||
(*env)->Throw(env, newIOException(env,
|
||||
"poll(2) failed with error code %d: %s", err, terror(err)));
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -352,7 +352,8 @@ Configuration for <<<conf/core-site.xml>>>
|
|||
| | | This value is deprecated. Use dfs.http.policy |
|
||||
*-------------------------+-------------------------+------------------------+
|
||||
| <<<dfs.http.policy>>> | <HTTP_ONLY> or <HTTPS_ONLY> or <HTTP_AND_HTTPS> | |
|
||||
| | | HTTPS_ONLY turns off http access |
|
||||
| | | HTTPS_ONLY turns off http access. This option takes precedence over |
|
||||
| | | the deprecated configuration dfs.https.enable and hadoop.ssl.enabled. |
|
||||
*-------------------------+-------------------------+------------------------+
|
||||
| <<<dfs.namenode.https-address>>> | <nn_host_fqdn:50470> | |
|
||||
*-------------------------+-------------------------+------------------------+
|
||||
|
|
|
@ -41,7 +41,7 @@ public class TestKeyShell {
|
|||
|
||||
@Test
|
||||
public void testKeySuccessfulKeyLifecycle() throws Exception {
|
||||
outContent.flush();
|
||||
outContent.reset();
|
||||
String[] args1 = {"create", "key1", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
int rc = 0;
|
||||
|
@ -52,14 +52,14 @@ public class TestKeyShell {
|
|||
assertTrue(outContent.toString().contains("key1 has been successfully " +
|
||||
"created."));
|
||||
|
||||
outContent.flush();
|
||||
outContent.reset();
|
||||
String[] args2 = {"list", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
rc = ks.run(args2);
|
||||
assertEquals(0, rc);
|
||||
assertTrue(outContent.toString().contains("key1"));
|
||||
|
||||
outContent.flush();
|
||||
outContent.reset();
|
||||
String[] args3 = {"roll", "key1", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
rc = ks.run(args3);
|
||||
|
@ -67,7 +67,7 @@ public class TestKeyShell {
|
|||
assertTrue(outContent.toString().contains("key1 has been successfully " +
|
||||
"rolled."));
|
||||
|
||||
outContent.flush();
|
||||
outContent.reset();
|
||||
String[] args4 = {"delete", "key1", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
rc = ks.run(args4);
|
||||
|
@ -75,12 +75,12 @@ public class TestKeyShell {
|
|||
assertTrue(outContent.toString().contains("key1 has been successfully " +
|
||||
"deleted."));
|
||||
|
||||
outContent.flush();
|
||||
outContent.reset();
|
||||
String[] args5 = {"list", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
rc = ks.run(args5);
|
||||
assertEquals(0, rc);
|
||||
assertTrue(outContent.toString().contains("key1"));
|
||||
assertFalse(outContent.toString(), outContent.toString().contains("key1"));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -165,7 +165,7 @@ public class TestKeyShell {
|
|||
assertTrue(outContent.toString().contains("key1 has been successfully " +
|
||||
"created."));
|
||||
|
||||
outContent.flush();
|
||||
outContent.reset();
|
||||
String[] args2 = {"delete", "key1", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
rc = ks.run(args2);
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io.nativeio;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Test;
|
||||
import org.apache.commons.lang.SystemUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
public class TestSharedFileDescriptorFactory {
|
||||
static final Log LOG = LogFactory.getLog(TestSharedFileDescriptorFactory.class);
|
||||
|
||||
private static final File TEST_BASE =
|
||||
new File(System.getProperty("test.build.data", "/tmp"));
|
||||
|
||||
@Test(timeout=10000)
|
||||
public void testReadAndWrite() throws Exception {
|
||||
Assume.assumeTrue(NativeIO.isAvailable());
|
||||
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
|
||||
File path = new File(TEST_BASE, "testReadAndWrite");
|
||||
path.mkdirs();
|
||||
SharedFileDescriptorFactory factory =
|
||||
new SharedFileDescriptorFactory("woot_", path.getAbsolutePath());
|
||||
FileInputStream inStream = factory.createDescriptor(4096);
|
||||
FileOutputStream outStream = new FileOutputStream(inStream.getFD());
|
||||
outStream.write(101);
|
||||
inStream.getChannel().position(0);
|
||||
Assert.assertEquals(101, inStream.read());
|
||||
inStream.close();
|
||||
outStream.close();
|
||||
FileUtil.fullyDelete(path);
|
||||
}
|
||||
|
||||
static private void createTempFile(String path) throws Exception {
|
||||
FileOutputStream fos = new FileOutputStream(path);
|
||||
fos.write(101);
|
||||
fos.close();
|
||||
}
|
||||
|
||||
@Test(timeout=10000)
|
||||
public void testCleanupRemainders() throws Exception {
|
||||
Assume.assumeTrue(NativeIO.isAvailable());
|
||||
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
|
||||
File path = new File(TEST_BASE, "testCleanupRemainders");
|
||||
path.mkdirs();
|
||||
String remainder1 = path.getAbsolutePath() +
|
||||
Path.SEPARATOR + "woot2_remainder1";
|
||||
String remainder2 = path.getAbsolutePath() +
|
||||
Path.SEPARATOR + "woot2_remainder2";
|
||||
createTempFile(remainder1);
|
||||
createTempFile(remainder2);
|
||||
new SharedFileDescriptorFactory("woot2_", path.getAbsolutePath());
|
||||
// creating the SharedFileDescriptorFactory should have removed
|
||||
// the remainders
|
||||
Assert.assertFalse(new File(remainder1).exists());
|
||||
Assert.assertFalse(new File(remainder2).exists());
|
||||
FileUtil.fullyDelete(path);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,150 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.net.unix;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.google.common.util.concurrent.Uninterruptibles;
|
||||
|
||||
public class TestDomainSocketWatcher {
|
||||
static final Log LOG = LogFactory.getLog(TestDomainSocketWatcher.class);
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that we can create a DomainSocketWatcher and then shut it down.
|
||||
*/
|
||||
@Test(timeout=60000)
|
||||
public void testCreateShutdown() throws Exception {
|
||||
DomainSocketWatcher watcher = new DomainSocketWatcher(10000000);
|
||||
watcher.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that we can get notifications out a DomainSocketWatcher.
|
||||
*/
|
||||
@Test(timeout=180000)
|
||||
public void testDeliverNotifications() throws Exception {
|
||||
DomainSocketWatcher watcher = new DomainSocketWatcher(10000000);
|
||||
DomainSocket pair[] = DomainSocket.socketpair();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
watcher.add(pair[1], new DomainSocketWatcher.Handler() {
|
||||
@Override
|
||||
public boolean handle(DomainSocket sock) {
|
||||
latch.countDown();
|
||||
return true;
|
||||
}
|
||||
});
|
||||
pair[0].close();
|
||||
latch.await();
|
||||
watcher.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that a java interruption can stop the watcher thread
|
||||
*/
|
||||
@Test(timeout=60000)
|
||||
public void testInterruption() throws Exception {
|
||||
DomainSocketWatcher watcher = new DomainSocketWatcher(10);
|
||||
watcher.interrupt();
|
||||
Uninterruptibles.joinUninterruptibly(watcher);
|
||||
}
|
||||
|
||||
@Test(timeout=300000)
|
||||
public void testStress() throws Exception {
|
||||
final int SOCKET_NUM = 250;
|
||||
final ReentrantLock lock = new ReentrantLock();
|
||||
final DomainSocketWatcher watcher = new DomainSocketWatcher(10000000);
|
||||
final ArrayList<DomainSocket[]> pairs = new ArrayList<DomainSocket[]>();
|
||||
final AtomicInteger handled = new AtomicInteger(0);
|
||||
|
||||
final Thread adderThread = new Thread(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
for (int i = 0; i < SOCKET_NUM; i++) {
|
||||
DomainSocket pair[] = DomainSocket.socketpair();
|
||||
watcher.add(pair[1], new DomainSocketWatcher.Handler() {
|
||||
@Override
|
||||
public boolean handle(DomainSocket sock) {
|
||||
handled.incrementAndGet();
|
||||
return true;
|
||||
}
|
||||
});
|
||||
lock.lock();
|
||||
try {
|
||||
pairs.add(pair);
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
LOG.error(e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
final Thread removerThread = new Thread(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
final Random random = new Random();
|
||||
try {
|
||||
while (handled.get() != SOCKET_NUM) {
|
||||
lock.lock();
|
||||
try {
|
||||
if (!pairs.isEmpty()) {
|
||||
int idx = random.nextInt(pairs.size());
|
||||
DomainSocket pair[] = pairs.remove(idx);
|
||||
if (random.nextBoolean()) {
|
||||
pair[0].close();
|
||||
} else {
|
||||
watcher.remove(pair[1]);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
LOG.error(e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
adderThread.start();
|
||||
removerThread.start();
|
||||
Uninterruptibles.joinUninterruptibly(adderThread);
|
||||
Uninterruptibles.joinUninterruptibly(removerThread);
|
||||
watcher.close();
|
||||
}
|
||||
}
|
|
@ -16,26 +16,20 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.util;
|
||||
package org.apache.hadoop.service;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.service.Service.STATE;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.service.AbstractService;
|
||||
import org.apache.hadoop.service.BreakableService;
|
||||
import org.apache.hadoop.service.CompositeService;
|
||||
import org.apache.hadoop.service.Service;
|
||||
import org.apache.hadoop.service.ServiceStateException;
|
||||
import org.apache.hadoop.service.Service.STATE;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestCompositeService {
|
||||
|
||||
private static final int NUM_OF_SERVICES = 5;
|
||||
|
@ -156,7 +150,7 @@ public class TestCompositeService {
|
|||
try {
|
||||
serviceManager.start();
|
||||
fail("Exception should have been thrown due to startup failure of last service");
|
||||
} catch (YarnRuntimeException e) {
|
||||
} catch (ServiceTestRuntimeException e) {
|
||||
for (int i = 0; i < NUM_OF_SERVICES - 1; i++) {
|
||||
if (i >= FAILED_SERVICE_SEQ_NUMBER && STOP_ONLY_STARTED_SERVICES) {
|
||||
// Failed service state should be INITED
|
||||
|
@ -197,7 +191,7 @@ public class TestCompositeService {
|
|||
// Stop the composite service
|
||||
try {
|
||||
serviceManager.stop();
|
||||
} catch (YarnRuntimeException e) {
|
||||
} catch (ServiceTestRuntimeException e) {
|
||||
}
|
||||
assertInState(STATE.STOPPED, services);
|
||||
}
|
||||
|
@ -335,7 +329,41 @@ public class TestCompositeService {
|
|||
|
||||
testService.init(new Configuration());
|
||||
assertEquals("Incorrect number of services",
|
||||
1, testService.getServices().size());
|
||||
1, testService.getServices().size());
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
public void testAddInitedSiblingInInit() throws Throwable {
|
||||
CompositeService parent = new CompositeService("parent");
|
||||
BreakableService sibling = new BreakableService();
|
||||
sibling.init(new Configuration());
|
||||
parent.addService(new AddSiblingService(parent,
|
||||
sibling,
|
||||
STATE.INITED));
|
||||
parent.init(new Configuration());
|
||||
parent.start();
|
||||
parent.stop();
|
||||
assertEquals("Incorrect number of services",
|
||||
2, parent.getServices().size());
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
public void testAddUninitedSiblingInInit() throws Throwable {
|
||||
CompositeService parent = new CompositeService("parent");
|
||||
BreakableService sibling = new BreakableService();
|
||||
parent.addService(new AddSiblingService(parent,
|
||||
sibling,
|
||||
STATE.INITED));
|
||||
parent.init(new Configuration());
|
||||
try {
|
||||
parent.start();
|
||||
fail("Expected an exception, got " + parent);
|
||||
} catch (ServiceStateException e) {
|
||||
//expected
|
||||
}
|
||||
parent.stop();
|
||||
assertEquals("Incorrect number of services",
|
||||
2, parent.getServices().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -365,6 +393,105 @@ public class TestCompositeService {
|
|||
2, testService.getServices().size());
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
public void testAddStartedChildBeforeInit() throws Throwable {
|
||||
CompositeService parent = new CompositeService("parent");
|
||||
BreakableService child = new BreakableService();
|
||||
child.init(new Configuration());
|
||||
child.start();
|
||||
AddSiblingService.addChildToService(parent, child);
|
||||
try {
|
||||
parent.init(new Configuration());
|
||||
fail("Expected an exception, got " + parent);
|
||||
} catch (ServiceStateException e) {
|
||||
//expected
|
||||
}
|
||||
parent.stop();
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
public void testAddStoppedChildBeforeInit() throws Throwable {
|
||||
CompositeService parent = new CompositeService("parent");
|
||||
BreakableService child = new BreakableService();
|
||||
child.init(new Configuration());
|
||||
child.start();
|
||||
child.stop();
|
||||
AddSiblingService.addChildToService(parent, child);
|
||||
try {
|
||||
parent.init(new Configuration());
|
||||
fail("Expected an exception, got " + parent);
|
||||
} catch (ServiceStateException e) {
|
||||
//expected
|
||||
}
|
||||
parent.stop();
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
public void testAddStartedSiblingInStart() throws Throwable {
|
||||
CompositeService parent = new CompositeService("parent");
|
||||
BreakableService sibling = new BreakableService();
|
||||
sibling.init(new Configuration());
|
||||
sibling.start();
|
||||
parent.addService(new AddSiblingService(parent,
|
||||
sibling,
|
||||
STATE.STARTED));
|
||||
parent.init(new Configuration());
|
||||
parent.start();
|
||||
parent.stop();
|
||||
assertEquals("Incorrect number of services",
|
||||
2, parent.getServices().size());
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
public void testAddUninitedSiblingInStart() throws Throwable {
|
||||
CompositeService parent = new CompositeService("parent");
|
||||
BreakableService sibling = new BreakableService();
|
||||
parent.addService(new AddSiblingService(parent,
|
||||
sibling,
|
||||
STATE.STARTED));
|
||||
parent.init(new Configuration());
|
||||
assertInState(STATE.NOTINITED, sibling);
|
||||
parent.start();
|
||||
parent.stop();
|
||||
assertEquals("Incorrect number of services",
|
||||
2, parent.getServices().size());
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
public void testAddStartedSiblingInInit() throws Throwable {
|
||||
CompositeService parent = new CompositeService("parent");
|
||||
BreakableService sibling = new BreakableService();
|
||||
sibling.init(new Configuration());
|
||||
sibling.start();
|
||||
parent.addService(new AddSiblingService(parent,
|
||||
sibling,
|
||||
STATE.INITED));
|
||||
parent.init(new Configuration());
|
||||
assertInState(STATE.STARTED, sibling);
|
||||
parent.start();
|
||||
assertInState(STATE.STARTED, sibling);
|
||||
parent.stop();
|
||||
assertEquals("Incorrect number of services",
|
||||
2, parent.getServices().size());
|
||||
assertInState(STATE.STOPPED, sibling);
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
public void testAddStartedSiblingInStop() throws Throwable {
|
||||
CompositeService parent = new CompositeService("parent");
|
||||
BreakableService sibling = new BreakableService();
|
||||
sibling.init(new Configuration());
|
||||
sibling.start();
|
||||
parent.addService(new AddSiblingService(parent,
|
||||
sibling,
|
||||
STATE.STOPPED));
|
||||
parent.init(new Configuration());
|
||||
parent.start();
|
||||
parent.stop();
|
||||
assertEquals("Incorrect number of services",
|
||||
2, parent.getServices().size());
|
||||
}
|
||||
|
||||
public static class CompositeServiceAddingAChild extends CompositeService{
|
||||
Service child;
|
||||
|
||||
|
@ -379,7 +506,18 @@ public class TestCompositeService {
|
|||
super.serviceInit(conf);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static class ServiceTestRuntimeException extends RuntimeException {
|
||||
public ServiceTestRuntimeException(String message) {
|
||||
super(message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a composite service that keeps a count of the number of lifecycle
|
||||
* events called, and can be set to throw a {@link ServiceTestRuntimeException }
|
||||
* during service start or stop
|
||||
*/
|
||||
public static class CompositeServiceImpl extends CompositeService {
|
||||
|
||||
public static boolean isPolicyToStopOnlyStartedServices() {
|
||||
|
@ -408,7 +546,7 @@ public class TestCompositeService {
|
|||
@Override
|
||||
protected void serviceStart() throws Exception {
|
||||
if (throwExceptionOnStart) {
|
||||
throw new YarnRuntimeException("Fake service start exception");
|
||||
throw new ServiceTestRuntimeException("Fake service start exception");
|
||||
}
|
||||
counter++;
|
||||
callSequenceNumber = counter;
|
||||
|
@ -420,7 +558,7 @@ public class TestCompositeService {
|
|||
counter++;
|
||||
callSequenceNumber = counter;
|
||||
if (throwExceptionOnStop) {
|
||||
throw new YarnRuntimeException("Fake service stop exception");
|
||||
throw new ServiceTestRuntimeException("Fake service stop exception");
|
||||
}
|
||||
super.serviceStop();
|
||||
}
|
||||
|
@ -457,6 +595,9 @@ public class TestCompositeService {
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* Composite service that makes the addService method public to all
|
||||
*/
|
||||
public static class ServiceManager extends CompositeService {
|
||||
|
||||
public void addTestService(CompositeService service) {
|
||||
|
@ -468,4 +609,55 @@ public class TestCompositeService {
|
|||
}
|
||||
}
|
||||
|
||||
public static class AddSiblingService extends CompositeService {
|
||||
private final CompositeService parent;
|
||||
private final Service serviceToAdd;
|
||||
private STATE triggerState;
|
||||
|
||||
public AddSiblingService(CompositeService parent,
|
||||
Service serviceToAdd,
|
||||
STATE triggerState) {
|
||||
super("ParentStateManipulatorService");
|
||||
this.parent = parent;
|
||||
this.serviceToAdd = serviceToAdd;
|
||||
this.triggerState = triggerState;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the serviceToAdd to the parent if this service
|
||||
* is in the state requested
|
||||
*/
|
||||
private void maybeAddSibling() {
|
||||
if (getServiceState() == triggerState) {
|
||||
parent.addService(serviceToAdd);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void serviceInit(Configuration conf) throws Exception {
|
||||
maybeAddSibling();
|
||||
super.serviceInit(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void serviceStart() throws Exception {
|
||||
maybeAddSibling();
|
||||
super.serviceStart();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void serviceStop() throws Exception {
|
||||
maybeAddSibling();
|
||||
super.serviceStop();
|
||||
}
|
||||
|
||||
/**
|
||||
* Expose addService method
|
||||
* @param parent parent service
|
||||
* @param child child to add
|
||||
*/
|
||||
public static void addChildToService(CompositeService parent, Service child) {
|
||||
parent.addService(child);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -50,14 +50,6 @@ public class IdUserGroup {
|
|||
private BiMap<Integer, String> gidNameMap = HashBiMap.create();
|
||||
|
||||
private long lastUpdateTime = 0; // Last time maps were updated
|
||||
|
||||
static public class DuplicateNameOrIdException extends IOException {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public DuplicateNameOrIdException(String msg) {
|
||||
super(msg);
|
||||
}
|
||||
}
|
||||
|
||||
public IdUserGroup() throws IOException {
|
||||
updateMaps();
|
||||
|
@ -80,7 +72,8 @@ public class IdUserGroup {
|
|||
}
|
||||
}
|
||||
|
||||
private static final String DUPLICATE_NAME_ID_DEBUG_INFO = "NFS gateway can't start with duplicate name or id on the host system.\n"
|
||||
private static final String DUPLICATE_NAME_ID_DEBUG_INFO =
|
||||
"NFS gateway could have problem starting with duplicate name or id on the host system.\n"
|
||||
+ "This is because HDFS (non-kerberos cluster) uses name as the only way to identify a user or group.\n"
|
||||
+ "The host system with duplicated user/group name or id might work fine most of the time by itself.\n"
|
||||
+ "However when NFS gateway talks to HDFS, HDFS accepts only user and group name.\n"
|
||||
|
@ -88,6 +81,16 @@ public class IdUserGroup {
|
|||
+ "<getent passwd | cut -d: -f1,3> and <getent group | cut -d: -f1,3> on Linux systms,\n"
|
||||
+ "<dscl . -list /Users UniqueID> and <dscl . -list /Groups PrimaryGroupID> on MacOS.";
|
||||
|
||||
private static void reportDuplicateEntry(final String header,
|
||||
final Integer key, final String value,
|
||||
final Integer ekey, final String evalue) {
|
||||
LOG.warn("\n" + header + String.format(
|
||||
"new entry (%d, %s), existing entry: (%d, %s).\n%s\n%s",
|
||||
key, value, ekey, evalue,
|
||||
"The new entry is to be ignored for the following reason.",
|
||||
DUPLICATE_NAME_ID_DEBUG_INFO));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the whole list of users and groups and save them in the maps.
|
||||
* @throws IOException
|
||||
|
@ -108,22 +111,27 @@ public class IdUserGroup {
|
|||
}
|
||||
LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]);
|
||||
// HDFS can't differentiate duplicate names with simple authentication
|
||||
Integer key = Integer.valueOf(nameId[1]);
|
||||
String value = nameId[0];
|
||||
final Integer key = Integer.valueOf(nameId[1]);
|
||||
final String value = nameId[0];
|
||||
if (map.containsKey(key)) {
|
||||
LOG.error(String.format(
|
||||
"Got duplicate id:(%d, %s), existing entry: (%d, %s).\n%s", key,
|
||||
value, key, map.get(key), DUPLICATE_NAME_ID_DEBUG_INFO));
|
||||
throw new DuplicateNameOrIdException("Got duplicate id.");
|
||||
final String prevValue = map.get(key);
|
||||
if (value.equals(prevValue)) {
|
||||
// silently ignore equivalent entries
|
||||
continue;
|
||||
}
|
||||
reportDuplicateEntry(
|
||||
"Got multiple names associated with the same id: ",
|
||||
key, value, key, prevValue);
|
||||
continue;
|
||||
}
|
||||
if (map.containsValue(nameId[0])) {
|
||||
LOG.error(String.format(
|
||||
"Got duplicate name:(%d, %s), existing entry: (%d, %s) \n%s",
|
||||
key, value, map.inverse().get(value), value,
|
||||
DUPLICATE_NAME_ID_DEBUG_INFO));
|
||||
throw new DuplicateNameOrIdException("Got duplicate name");
|
||||
if (map.containsValue(value)) {
|
||||
final Integer prevKey = map.inverse().get(value);
|
||||
reportDuplicateEntry(
|
||||
"Got multiple ids associated with the same name: ",
|
||||
key, value, prevKey, value);
|
||||
continue;
|
||||
}
|
||||
map.put(Integer.valueOf(nameId[1]), nameId[0]);
|
||||
map.put(key, value);
|
||||
}
|
||||
LOG.info("Updated " + mapName + " map size:" + map.size());
|
||||
|
||||
|
|
|
@ -17,11 +17,10 @@
|
|||
*/
|
||||
package org.apache.hadoop.nfs.nfs3;
|
||||
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.nfs.nfs3.IdUserGroup.DuplicateNameOrIdException;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.google.common.collect.BiMap;
|
||||
|
@ -33,24 +32,36 @@ public class TestIdUserGroup {
|
|||
public void testDuplicates() throws IOException {
|
||||
String GET_ALL_USERS_CMD = "echo \"root:x:0:0:root:/root:/bin/bash\n"
|
||||
+ "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
|
||||
+ "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\""
|
||||
+ "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\n"
|
||||
+ "hdfs1:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
|
||||
+ "hdfs2:x:11502:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
|
||||
+ "bin:x:2:2:bin:/bin:/bin/sh\n"
|
||||
+ "bin:x:1:1:bin:/bin:/sbin/nologin\n"
|
||||
+ "daemon:x:1:1:daemon:/usr/sbin:/bin/sh\n"
|
||||
+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""
|
||||
+ " | cut -d: -f1,3";
|
||||
String GET_ALL_GROUPS_CMD = "echo \"hdfs:*:11501:hrt_hdfs\n"
|
||||
+ "mapred:x:497\n" + "mapred2:x:497\"" + " | cut -d: -f1,3";
|
||||
+ "mapred:x:497\n"
|
||||
+ "mapred2:x:497\n"
|
||||
+ "mapred:x:498\n"
|
||||
+ "mapred3:x:498\""
|
||||
+ " | cut -d: -f1,3";
|
||||
// Maps for id to name map
|
||||
BiMap<Integer, String> uMap = HashBiMap.create();
|
||||
BiMap<Integer, String> gMap = HashBiMap.create();
|
||||
|
||||
try {
|
||||
IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":");
|
||||
fail("didn't detect the duplicate name");
|
||||
} catch (DuplicateNameOrIdException e) {
|
||||
}
|
||||
IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":");
|
||||
assertTrue(uMap.size() == 5);
|
||||
assertEquals(uMap.get(0), "root");
|
||||
assertEquals(uMap.get(11501), "hdfs");
|
||||
assertEquals(uMap.get(11502), "hdfs2");
|
||||
assertEquals(uMap.get(2), "bin");
|
||||
assertEquals(uMap.get(1), "daemon");
|
||||
|
||||
try {
|
||||
IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":");
|
||||
fail("didn't detect the duplicate id");
|
||||
} catch (DuplicateNameOrIdException e) {
|
||||
}
|
||||
IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":");
|
||||
assertTrue(gMap.size() == 3);
|
||||
assertEquals(gMap.get(11501), "hdfs");
|
||||
assertEquals(gMap.get(497), "mapred");
|
||||
assertEquals(gMap.get(498), "mapred3");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
|
|||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
|
||||
import org.apache.hadoop.oncrpc.security.CredentialsNone;
|
||||
|
@ -31,17 +32,17 @@ import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
|
|||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.buffer.ChannelBuffers;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
import org.jboss.netty.channel.ChannelException;
|
||||
import org.jboss.netty.channel.ChannelHandlerContext;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
public class TestFrameDecoder {
|
||||
|
||||
private static int port = 12345; // some random server port
|
||||
private static int resultSize;
|
||||
|
||||
static void testRequest(XDR request) {
|
||||
SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", port, request,
|
||||
static void testRequest(XDR request, int serverPort) {
|
||||
SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", serverPort, request,
|
||||
true);
|
||||
tcpClient.run();
|
||||
}
|
||||
|
@ -148,10 +149,25 @@ public class TestFrameDecoder {
|
|||
@Test
|
||||
public void testFrames() {
|
||||
|
||||
RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
|
||||
"localhost", port, 100000, 1, 2);
|
||||
SimpleTcpServer tcpServer = new SimpleTcpServer(port, program, 1);
|
||||
tcpServer.run();
|
||||
Random rand = new Random();
|
||||
int serverPort = 30000 + rand.nextInt(10000);
|
||||
int retries = 10; // A few retries in case initial choice is in use.
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
|
||||
"localhost", serverPort, 100000, 1, 2);
|
||||
SimpleTcpServer tcpServer = new SimpleTcpServer(serverPort, program, 1);
|
||||
tcpServer.run();
|
||||
break; // Successfully bound a port, break out.
|
||||
} catch (ChannelException ce) {
|
||||
if (retries-- > 0) {
|
||||
serverPort += rand.nextInt(20); // Port in use? Try another.
|
||||
} else {
|
||||
throw ce; // Out of retries.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
XDR xdrOut = createGetportMount();
|
||||
int headerSize = xdrOut.size();
|
||||
|
@ -161,7 +177,7 @@ public class TestFrameDecoder {
|
|||
int requestSize = xdrOut.size() - headerSize;
|
||||
|
||||
// Send the request to the server
|
||||
testRequest(xdrOut);
|
||||
testRequest(xdrOut, serverPort);
|
||||
|
||||
// Verify the server got the request with right size
|
||||
assertEquals(requestSize, resultSize);
|
||||
|
|
|
@ -303,6 +303,14 @@ Release 2.4.0 - UNRELEASED
|
|||
HDFS-5804. HDFS NFS Gateway fails to mount and proxy when using Kerberos.
|
||||
(Abin Shahab via jing9)
|
||||
|
||||
HDFS-5859. DataNode#checkBlockToken should check block tokens even if
|
||||
security is not enabled. (cmccabe)
|
||||
|
||||
HDFS-5746. Add ShortCircuitSharedMemorySegment (cmccabe)
|
||||
|
||||
HDFS-4911. Reduce PeerCache timeout to be commensurate with
|
||||
dfs.datanode.socket.reuse.keepalive (cmccabe)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery
|
||||
|
@ -319,6 +327,32 @@ Release 2.4.0 - UNRELEASED
|
|||
HDFS-5856. DataNode.checkDiskError might throw NPE.
|
||||
(Josh Elser via suresh)
|
||||
|
||||
HDFS-5828. BlockPlacementPolicyWithNodeGroup can place multiple replicas on
|
||||
the same node group when dfs.namenode.avoid.write.stale.datanode is true.
|
||||
(Buddy via junping_du)
|
||||
|
||||
HDFS-5767. NFS implementation assumes userName userId mapping to be unique,
|
||||
which is not true sometimes (Yongjun Zhang via brandonli)
|
||||
|
||||
HDFS-5791. TestHttpsFileSystem should use a random port to avoid binding
|
||||
error during testing (Haohui Mai via brandonli)
|
||||
|
||||
HDFS-5709. Improve NameNode upgrade with existing reserved paths and path
|
||||
components. (Andrew Wang via atm)
|
||||
|
||||
HDFS-5881. Fix skip() of the short-circuit local reader(legacy). (kihwal)
|
||||
|
||||
HDFS-5895. HDFS cacheadmin -listPools has exit_code of 1 when the command
|
||||
returns 0 result. (Tassapol Athiapinya via cnauroth)
|
||||
|
||||
HDFS-5807. TestBalancerWithNodeGroup.testBalancerWithNodeGroup fails
|
||||
intermittently. (Chen He via kihwal)
|
||||
|
||||
HDFS-5882. TestAuditLogs is flaky (jxiang via cmccabe)
|
||||
|
||||
HDFS-5900. Cannot set cache pool limit of "unlimited" via CacheAdmin.
|
||||
(wang)
|
||||
|
||||
Release 2.3.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -848,6 +882,15 @@ Release 2.3.0 - UNRELEASED
|
|||
HDFS-5842. Cannot create hftp filesystem when using a proxy user ugi and a doAs
|
||||
on a secure cluster. (jing9)
|
||||
|
||||
HDFS-5399. Revisit SafeModeException and corresponding retry policies.
|
||||
(Jing Zhao via todd)
|
||||
|
||||
HDFS-5876. SecureDataNodeStarter does not pick up configuration in
|
||||
hdfs-site.xml. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5873. dfs.http.policy should have higher precedence over dfs.https.enable.
|
||||
(Haohui Mai via jing9)
|
||||
|
||||
BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-4985. Add storage type to the protocol and expose it in block report
|
||||
|
|
|
@ -629,7 +629,7 @@ class BlockReaderLocalLegacy implements BlockReader {
|
|||
skipBuf = new byte[bytesPerChecksum];
|
||||
}
|
||||
int ret = read(skipBuf, 0, (int)(n - remaining));
|
||||
return ret;
|
||||
return (remaining + ret);
|
||||
}
|
||||
|
||||
// optimize for big gap: discard the current buffer, skip to
|
||||
|
@ -660,9 +660,9 @@ class BlockReaderLocalLegacy implements BlockReader {
|
|||
int ret = read(skipBuf, 0, myOffsetFromChunkBoundary);
|
||||
|
||||
if (ret == -1) { // EOS
|
||||
return toskip;
|
||||
return (toskip + remaining);
|
||||
} else {
|
||||
return (toskip + ret);
|
||||
return (toskip + remaining + ret);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME
|
|||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
|
||||
|
@ -261,6 +263,7 @@ public class DFSClient implements java.io.Closeable {
|
|||
public static class Conf {
|
||||
final int hdfsTimeout; // timeout value for a DFS operation.
|
||||
final int maxFailoverAttempts;
|
||||
final int maxRetryAttempts;
|
||||
final int failoverSleepBaseMillis;
|
||||
final int failoverSleepMaxMillis;
|
||||
final int maxBlockAcquireFailures;
|
||||
|
@ -306,6 +309,9 @@ public class DFSClient implements java.io.Closeable {
|
|||
maxFailoverAttempts = conf.getInt(
|
||||
DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
|
||||
DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
|
||||
maxRetryAttempts = conf.getInt(
|
||||
DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
|
||||
DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
|
||||
failoverSleepBaseMillis = conf.getInt(
|
||||
DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
|
||||
DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
|
||||
|
|
|
@ -84,9 +84,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final int DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_DEFAULT = 0;
|
||||
public static final String DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY = "dfs.client.failover.connection.retries.on.timeouts";
|
||||
public static final int DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0;
|
||||
public static final String DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY = "dfs.client.retry.max.attempts";
|
||||
public static final int DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
|
||||
|
||||
public static final String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = "dfs.client.socketcache.expiryMsec";
|
||||
public static final long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 2 * 60 * 1000;
|
||||
public static final long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 3000;
|
||||
public static final String DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL = "dfs.client.write.exclude.nodes.cache.expiry.interval.millis";
|
||||
public static final long DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT = 10 * 60 * 1000; // 10 minutes, in ms
|
||||
public static final String DFS_NAMENODE_BACKUP_ADDRESS_KEY = "dfs.namenode.backup.address";
|
||||
|
@ -217,7 +219,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_DATANODE_SYNCONCLOSE_KEY = "dfs.datanode.synconclose";
|
||||
public static final boolean DFS_DATANODE_SYNCONCLOSE_DEFAULT = false;
|
||||
public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive";
|
||||
public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000;
|
||||
public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 4000;
|
||||
|
||||
public static final String DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY = "dfs.namenode.datanode.registration.ip-hostname-check";
|
||||
public static final boolean DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT = true;
|
||||
|
@ -578,6 +580,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,...
|
||||
public static final String DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = "dfs.http.client.failover.max.attempts";
|
||||
public static final int DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = 15;
|
||||
public static final String DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY = "dfs.http.client.retry.max.attempts";
|
||||
public static final int DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
|
||||
public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY = "dfs.http.client.failover.sleep.base.millis";
|
||||
public static final int DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT = 500;
|
||||
public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY = "dfs.http.client.failover.sleep.max.millis";
|
||||
|
|
|
@ -1345,6 +1345,14 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
|
|||
pos += blockReader.skip(diff);
|
||||
if (pos == targetPos) {
|
||||
done = true;
|
||||
} else {
|
||||
// The range was already checked. If the block reader returns
|
||||
// something unexpected instead of throwing an exception, it is
|
||||
// most likely a bug.
|
||||
String errMsg = "BlockReader failed to seek to " +
|
||||
targetPos + ". Instead, it seeked to " + pos + ".";
|
||||
DFSClient.LOG.warn(errMsg);
|
||||
throw new IOException(errMsg);
|
||||
}
|
||||
} catch (IOException e) {//make following read to retry
|
||||
if(DFSClient.LOG.isDebugEnabled()) {
|
||||
|
|
|
@ -261,6 +261,47 @@ public class DFSUtil {
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a string is a valid path component. For instance, components
|
||||
* cannot contain a ":" or "/", and cannot be equal to a reserved component
|
||||
* like ".snapshot".
|
||||
* <p>
|
||||
* The primary use of this method is for validating paths when loading the
|
||||
* FSImage. During normal NN operation, paths are sometimes allowed to
|
||||
* contain reserved components.
|
||||
*
|
||||
* @return If component is valid
|
||||
*/
|
||||
public static boolean isValidNameForComponent(String component) {
|
||||
if (component.equals(".") ||
|
||||
component.equals("..") ||
|
||||
component.indexOf(":") >= 0 ||
|
||||
component.indexOf("/") >= 0) {
|
||||
return false;
|
||||
}
|
||||
return !isReservedPathComponent(component);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns if the component is reserved.
|
||||
*
|
||||
* <p>
|
||||
* Note that some components are only reserved under certain directories, e.g.
|
||||
* "/.reserved" is reserved, while "/hadoop/.reserved" is not.
|
||||
*
|
||||
* @param component
|
||||
* @return if the component is reserved
|
||||
*/
|
||||
public static boolean isReservedPathComponent(String component) {
|
||||
for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) {
|
||||
if (component.equals(reserved)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a byte array to a string using UTF8 encoding.
|
||||
*/
|
||||
|
@ -312,7 +353,25 @@ public class DFSUtil {
|
|||
}
|
||||
return result.toString();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Converts a list of path components into a path using Path.SEPARATOR.
|
||||
*
|
||||
* @param components Path components
|
||||
* @return Combined path as a UTF-8 string
|
||||
*/
|
||||
public static String strings2PathString(String[] components) {
|
||||
if (components.length == 0) {
|
||||
return "";
|
||||
}
|
||||
if (components.length == 1) {
|
||||
if (components[0] == null || components[0].isEmpty()) {
|
||||
return Path.SEPARATOR;
|
||||
}
|
||||
}
|
||||
return Joiner.on(Path.SEPARATOR).join(components);
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a list of path components returns a byte array
|
||||
*/
|
||||
|
@ -1508,31 +1567,34 @@ public class DFSUtil {
|
|||
* configuration settings.
|
||||
*/
|
||||
public static HttpConfig.Policy getHttpPolicy(Configuration conf) {
|
||||
String httpPolicy = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY,
|
||||
DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT);
|
||||
|
||||
HttpConfig.Policy policy = HttpConfig.Policy.fromString(httpPolicy);
|
||||
|
||||
if (policy == HttpConfig.Policy.HTTP_ONLY) {
|
||||
boolean httpsEnabled = conf.getBoolean(
|
||||
DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
|
||||
String policyStr = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY);
|
||||
if (policyStr == null) {
|
||||
boolean https = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
|
||||
DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT);
|
||||
|
||||
boolean hadoopSslEnabled = conf.getBoolean(
|
||||
boolean hadoopSsl = conf.getBoolean(
|
||||
CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY,
|
||||
CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT);
|
||||
|
||||
if (hadoopSslEnabled) {
|
||||
if (hadoopSsl) {
|
||||
LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY
|
||||
+ " is deprecated. Please use "
|
||||
+ DFSConfigKeys.DFS_HTTPS_ENABLE_KEY + ".");
|
||||
policy = HttpConfig.Policy.HTTPS_ONLY;
|
||||
} else if (httpsEnabled) {
|
||||
LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
|
||||
+ " is deprecated. Please use "
|
||||
+ DFSConfigKeys.DFS_HTTPS_ENABLE_KEY + ".");
|
||||
policy = HttpConfig.Policy.HTTP_AND_HTTPS;
|
||||
+ " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
|
||||
+ ".");
|
||||
}
|
||||
if (https) {
|
||||
LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
|
||||
+ " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
|
||||
+ ".");
|
||||
}
|
||||
|
||||
return (hadoopSsl || https) ? HttpConfig.Policy.HTTP_AND_HTTPS
|
||||
: HttpConfig.Policy.HTTP_ONLY;
|
||||
}
|
||||
|
||||
HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
|
||||
if (policy == null) {
|
||||
throw new HadoopIllegalArgumentException("Unregonized value '"
|
||||
+ policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY);
|
||||
}
|
||||
|
||||
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
|
||||
|
|
|
@ -24,6 +24,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME
|
|||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Constructor;
|
||||
|
@ -144,9 +146,10 @@ public class NameNodeProxies {
|
|||
.createFailoverProxyProvider(conf, failoverProxyProviderClass, xface,
|
||||
nameNodeUri);
|
||||
Conf config = new Conf(conf);
|
||||
T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, RetryPolicies
|
||||
.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
|
||||
config.maxFailoverAttempts, config.failoverSleepBaseMillis,
|
||||
T proxy = (T) RetryProxy.create(xface, failoverProxyProvider,
|
||||
RetryPolicies.failoverOnNetworkException(
|
||||
RetryPolicies.TRY_ONCE_THEN_FAIL, config.maxFailoverAttempts,
|
||||
config.maxRetryAttempts, config.failoverSleepBaseMillis,
|
||||
config.failoverSleepMaxMillis));
|
||||
|
||||
Text dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
|
||||
|
@ -192,11 +195,14 @@ public class NameNodeProxies {
|
|||
int maxFailoverAttempts = config.getInt(
|
||||
DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
|
||||
DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
|
||||
int maxRetryAttempts = config.getInt(
|
||||
DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
|
||||
DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
|
||||
InvocationHandler dummyHandler = new LossyRetryInvocationHandler<T>(
|
||||
numResponseToDrop, failoverProxyProvider,
|
||||
RetryPolicies.failoverOnNetworkException(
|
||||
RetryPolicies.TRY_ONCE_THEN_FAIL,
|
||||
Math.max(numResponseToDrop + 1, maxFailoverAttempts), delay,
|
||||
RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts,
|
||||
Math.max(numResponseToDrop + 1, maxRetryAttempts), delay,
|
||||
maxCap));
|
||||
|
||||
T proxy = (T) Proxy.newProxyInstance(
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map.Entry;
|
||||
|
@ -25,6 +26,7 @@ import java.util.Map.Entry;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.LinkedListMultimap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
|
@ -118,6 +120,11 @@ class PeerCache {
|
|||
return instance;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public static synchronized void setInstance(int c, long e) {
|
||||
instance = new PeerCache(c, e);
|
||||
}
|
||||
|
||||
private boolean isDaemonStarted() {
|
||||
return (daemon == null)? false: true;
|
||||
}
|
||||
|
@ -171,8 +178,17 @@ class PeerCache {
|
|||
while (iter.hasNext()) {
|
||||
Value candidate = iter.next();
|
||||
iter.remove();
|
||||
if (!candidate.getPeer().isClosed()) {
|
||||
return candidate.getPeer();
|
||||
long ageMs = Time.monotonicNow() - candidate.getTime();
|
||||
Peer peer = candidate.getPeer();
|
||||
if (ageMs >= expiryPeriod) {
|
||||
try {
|
||||
peer.close();
|
||||
} catch (IOException e) {
|
||||
LOG.warn("got IOException closing stale peer " + peer +
|
||||
", which is " + ageMs + " ms old");
|
||||
}
|
||||
} else if (!peer.isClosed()) {
|
||||
return peer;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
|
|
|
@ -0,0 +1,302 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.client;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO.POSIX;
|
||||
import org.apache.hadoop.util.CloseableReferenceCount;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.primitives.Ints;
|
||||
|
||||
import sun.misc.Unsafe;
|
||||
|
||||
public class ShortCircuitSharedMemorySegment implements Closeable {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(ShortCircuitSharedMemorySegment.class);
|
||||
|
||||
private static final int BYTES_PER_SLOT = 64;
|
||||
|
||||
private static final Unsafe unsafe;
|
||||
|
||||
static {
|
||||
Unsafe theUnsafe = null;
|
||||
try {
|
||||
Field f = Unsafe.class.getDeclaredField("theUnsafe");
|
||||
f.setAccessible(true);
|
||||
theUnsafe = (Unsafe)f.get(null);
|
||||
} catch (Throwable e) {
|
||||
LOG.error("failed to load misc.Unsafe", e);
|
||||
}
|
||||
unsafe = theUnsafe;
|
||||
}
|
||||
|
||||
/**
|
||||
* A slot containing information about a replica.
|
||||
*
|
||||
* The format is:
|
||||
* word 0
|
||||
* bit 0:32 Slot flags (see below).
|
||||
* bit 33:63 Anchor count.
|
||||
* word 1:7
|
||||
* Reserved for future use, such as statistics.
|
||||
* Padding is also useful for avoiding false sharing.
|
||||
*
|
||||
* Little-endian versus big-endian is not relevant here since both the client
|
||||
* and the server reside on the same computer and use the same orientation.
|
||||
*/
|
||||
public class Slot implements Closeable {
|
||||
/**
|
||||
* Flag indicating that the slot is in use.
|
||||
*/
|
||||
private static final long SLOT_IN_USE_FLAG = 1L<<63;
|
||||
|
||||
/**
|
||||
* Flag indicating that the slot can be anchored.
|
||||
*/
|
||||
private static final long ANCHORABLE_FLAG = 1L<<62;
|
||||
|
||||
private long slotAddress;
|
||||
|
||||
Slot(long slotAddress) {
|
||||
this.slotAddress = slotAddress;
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a given slot anchorable.
|
||||
*/
|
||||
public void makeAnchorable() {
|
||||
Preconditions.checkState(slotAddress != 0,
|
||||
"Called makeAnchorable on a slot that was closed.");
|
||||
long prev;
|
||||
do {
|
||||
prev = unsafe.getLongVolatile(null, this.slotAddress);
|
||||
if ((prev & ANCHORABLE_FLAG) != 0) {
|
||||
return;
|
||||
}
|
||||
} while (!unsafe.compareAndSwapLong(null, this.slotAddress,
|
||||
prev, prev | ANCHORABLE_FLAG));
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a given slot unanchorable.
|
||||
*/
|
||||
public void makeUnanchorable() {
|
||||
Preconditions.checkState(slotAddress != 0,
|
||||
"Called makeUnanchorable on a slot that was closed.");
|
||||
long prev;
|
||||
do {
|
||||
prev = unsafe.getLongVolatile(null, this.slotAddress);
|
||||
if ((prev & ANCHORABLE_FLAG) == 0) {
|
||||
return;
|
||||
}
|
||||
} while (!unsafe.compareAndSwapLong(null, this.slotAddress,
|
||||
prev, prev & (~ANCHORABLE_FLAG)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to add an anchor for a given slot.
|
||||
*
|
||||
* When a slot is anchored, we know that the block it refers to is resident
|
||||
* in memory.
|
||||
*
|
||||
* @return True if the slot is anchored.
|
||||
*/
|
||||
public boolean addAnchor() {
|
||||
long prev;
|
||||
do {
|
||||
prev = unsafe.getLongVolatile(null, this.slotAddress);
|
||||
if ((prev & 0x7fffffff) == 0x7fffffff) {
|
||||
// Too many other threads have anchored the slot (2 billion?)
|
||||
return false;
|
||||
}
|
||||
if ((prev & ANCHORABLE_FLAG) == 0) {
|
||||
// Slot can't be anchored right now.
|
||||
return false;
|
||||
}
|
||||
} while (!unsafe.compareAndSwapLong(null, this.slotAddress,
|
||||
prev, prev + 1));
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an anchor for a given slot.
|
||||
*/
|
||||
public void removeAnchor() {
|
||||
long prev;
|
||||
do {
|
||||
prev = unsafe.getLongVolatile(null, this.slotAddress);
|
||||
Preconditions.checkState((prev & 0x7fffffff) != 0,
|
||||
"Tried to remove anchor for slot " + slotAddress +", which was " +
|
||||
"not anchored.");
|
||||
} while (!unsafe.compareAndSwapLong(null, this.slotAddress,
|
||||
prev, prev - 1));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The index of this slot.
|
||||
*/
|
||||
public int getIndex() {
|
||||
Preconditions.checkState(slotAddress != 0);
|
||||
return Ints.checkedCast(
|
||||
(slotAddress - baseAddress) / BYTES_PER_SLOT);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (slotAddress == 0) return;
|
||||
long prev;
|
||||
do {
|
||||
prev = unsafe.getLongVolatile(null, this.slotAddress);
|
||||
Preconditions.checkState((prev & SLOT_IN_USE_FLAG) != 0,
|
||||
"tried to close slot that wasn't open");
|
||||
} while (!unsafe.compareAndSwapLong(null, this.slotAddress,
|
||||
prev, 0));
|
||||
slotAddress = 0;
|
||||
if (ShortCircuitSharedMemorySegment.this.refCount.unreference()) {
|
||||
ShortCircuitSharedMemorySegment.this.free();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The stream that we're going to use to create this shared memory segment.
|
||||
*
|
||||
* Although this is a FileInputStream, we are going to assume that the
|
||||
* underlying file descriptor is writable as well as readable.
|
||||
* It would be more appropriate to use a RandomAccessFile here, but that class
|
||||
* does not have any public accessor which returns a FileDescriptor, unlike
|
||||
* FileInputStream.
|
||||
*/
|
||||
private final FileInputStream stream;
|
||||
|
||||
/**
|
||||
* Length of the shared memory segment.
|
||||
*/
|
||||
private final int length;
|
||||
|
||||
/**
|
||||
* The base address of the memory-mapped file.
|
||||
*/
|
||||
private final long baseAddress;
|
||||
|
||||
/**
|
||||
* Reference count and 'closed' status.
|
||||
*/
|
||||
private final CloseableReferenceCount refCount = new CloseableReferenceCount();
|
||||
|
||||
public ShortCircuitSharedMemorySegment(FileInputStream stream)
|
||||
throws IOException {
|
||||
if (!NativeIO.isAvailable()) {
|
||||
throw new UnsupportedOperationException("NativeIO is not available.");
|
||||
}
|
||||
if (Shell.WINDOWS) {
|
||||
throw new UnsupportedOperationException(
|
||||
"ShortCircuitSharedMemorySegment is not yet implemented " +
|
||||
"for Windows.");
|
||||
}
|
||||
if (unsafe == null) {
|
||||
throw new UnsupportedOperationException(
|
||||
"can't use ShortCircuitSharedMemorySegment because we failed to " +
|
||||
"load misc.Unsafe.");
|
||||
}
|
||||
this.refCount.reference();
|
||||
this.stream = stream;
|
||||
this.length = getEffectiveLength(stream);
|
||||
this.baseAddress = POSIX.mmap(this.stream.getFD(),
|
||||
POSIX.MMAP_PROT_READ | POSIX.MMAP_PROT_WRITE, true, this.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the effective usable size of the shared memory segment.
|
||||
* We round down to a multiple of the slot size and do some validation.
|
||||
*
|
||||
* @param stream The stream we're using.
|
||||
* @return The effective usable size of the shared memory segment.
|
||||
*/
|
||||
private static int getEffectiveLength(FileInputStream stream)
|
||||
throws IOException {
|
||||
int intSize = Ints.checkedCast(stream.getChannel().size());
|
||||
int slots = intSize / BYTES_PER_SLOT;
|
||||
Preconditions.checkState(slots > 0, "size of shared memory segment was " +
|
||||
intSize + ", but that is not enough to hold even one slot.");
|
||||
return slots * BYTES_PER_SLOT;
|
||||
}
|
||||
|
||||
private boolean allocateSlot(long address) {
|
||||
long prev;
|
||||
do {
|
||||
prev = unsafe.getLongVolatile(null, address);
|
||||
if ((prev & Slot.SLOT_IN_USE_FLAG) != 0) {
|
||||
return false;
|
||||
}
|
||||
} while (!unsafe.compareAndSwapLong(null, address,
|
||||
prev, prev | Slot.SLOT_IN_USE_FLAG));
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate a new Slot in this shared memory segment.
|
||||
*
|
||||
* @return A newly allocated Slot, or null if there were no available
|
||||
* slots.
|
||||
*/
|
||||
public Slot allocateNextSlot() throws IOException {
|
||||
ShortCircuitSharedMemorySegment.this.refCount.reference();
|
||||
Slot slot = null;
|
||||
try {
|
||||
final int numSlots = length / BYTES_PER_SLOT;
|
||||
for (int i = 0; i < numSlots; i++) {
|
||||
long address = this.baseAddress + (i * BYTES_PER_SLOT);
|
||||
if (allocateSlot(address)) {
|
||||
slot = new Slot(address);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (slot == null) {
|
||||
if (refCount.unreference()) {
|
||||
free();
|
||||
}
|
||||
}
|
||||
}
|
||||
return slot;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
refCount.setClosed();
|
||||
if (refCount.unreference()) {
|
||||
free();
|
||||
}
|
||||
}
|
||||
|
||||
void free() throws IOException {
|
||||
IOUtils.cleanup(LOG, stream);
|
||||
POSIX.munmap(baseAddress, length);
|
||||
}
|
||||
}
|
|
@ -22,6 +22,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
|
||||
/************************************
|
||||
* Some handy constants
|
||||
|
@ -108,7 +109,17 @@ public class HdfsConstants {
|
|||
*/
|
||||
public static final int LAYOUT_VERSION = LayoutVersion
|
||||
.getCurrentLayoutVersion();
|
||||
|
||||
|
||||
/**
|
||||
* Path components that are reserved in HDFS.
|
||||
* <p>
|
||||
* .reserved is only reserved under root ("/").
|
||||
*/
|
||||
public static final String[] RESERVED_PATH_COMPONENTS = new String[] {
|
||||
HdfsConstants.DOT_SNAPSHOT_DIR,
|
||||
FSDirectory.DOT_RESERVED_STRING
|
||||
};
|
||||
|
||||
/**
|
||||
* A special path component contained in the path for a snapshot file/dir
|
||||
*/
|
||||
|
|
|
@ -317,7 +317,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
// We need to additionally exclude the nodes that were added to the
|
||||
// result list in the successful calls to choose*() above.
|
||||
for (DatanodeStorageInfo resultStorage : results) {
|
||||
oldExcludedNodes.add(resultStorage.getDatanodeDescriptor());
|
||||
addToExcludedNodes(resultStorage.getDatanodeDescriptor(), oldExcludedNodes);
|
||||
}
|
||||
// Set numOfReplicas, since it can get out of sync with the result list
|
||||
// if the NotEnoughReplicasException was thrown in chooseRandom().
|
||||
|
|
|
@ -59,7 +59,8 @@ public final class HdfsServerConstants {
|
|||
INITIALIZESHAREDEDITS("-initializeSharedEdits"),
|
||||
RECOVER ("-recover"),
|
||||
FORCE("-force"),
|
||||
NONINTERACTIVE("-nonInteractive");
|
||||
NONINTERACTIVE("-nonInteractive"),
|
||||
RENAMERESERVED("-renameReserved");
|
||||
|
||||
private final String name;
|
||||
|
||||
|
|
|
@ -362,13 +362,13 @@ public class DataNode extends Configured
|
|||
.setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
|
||||
|
||||
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
|
||||
InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
|
||||
String infoHost = infoSocAddr.getHostName();
|
||||
|
||||
if (policy.isHttpEnabled()) {
|
||||
if (secureResources == null) {
|
||||
InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
|
||||
int port = infoSocAddr.getPort();
|
||||
builder.addEndpoint(URI.create("http://" + infoHost + ":" + port));
|
||||
builder.addEndpoint(URI.create("http://"
|
||||
+ NetUtils.getHostPortString(infoSocAddr)));
|
||||
if (port == 0) {
|
||||
builder.setFindPort(true);
|
||||
}
|
||||
|
@ -381,7 +381,7 @@ public class DataNode extends Configured
|
|||
|
||||
if (policy.isHttpsEnabled()) {
|
||||
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
|
||||
DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0));
|
||||
DFS_DATANODE_HTTPS_ADDRESS_KEY, DFS_DATANODE_HTTPS_ADDRESS_DEFAULT));
|
||||
|
||||
Configuration sslConf = DFSUtil.loadSslConfiguration(conf);
|
||||
DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
|
||||
|
@ -390,7 +390,8 @@ public class DataNode extends Configured
|
|||
if (port == 0) {
|
||||
builder.setFindPort(true);
|
||||
}
|
||||
builder.addEndpoint(URI.create("https://" + infoHost + ":" + port));
|
||||
builder.addEndpoint(URI.create("https://"
|
||||
+ NetUtils.getHostPortString(secInfoSocAddr)));
|
||||
}
|
||||
|
||||
this.infoServer = builder.build();
|
||||
|
@ -1194,7 +1195,7 @@ public class DataNode extends Configured
|
|||
|
||||
private void checkBlockToken(ExtendedBlock block, Token<BlockTokenIdentifier> token,
|
||||
AccessMode accessMode) throws IOException {
|
||||
if (isBlockTokenEnabled && UserGroupInformation.isSecurityEnabled()) {
|
||||
if (isBlockTokenEnabled) {
|
||||
BlockTokenIdentifier id = new BlockTokenIdentifier();
|
||||
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
|
||||
DataInputStream in = new DataInputStream(buf);
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.commons.daemon.DaemonContext;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.http.HttpServer2;
|
||||
|
@ -62,7 +63,9 @@ public class SecureDataNodeStarter implements Daemon {
|
|||
@Override
|
||||
public void init(DaemonContext context) throws Exception {
|
||||
System.err.println("Initializing secure datanode resources");
|
||||
Configuration conf = new Configuration();
|
||||
// Create a new HdfsConfiguration object to ensure that the configuration in
|
||||
// hdfs-site.xml is picked up.
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
||||
// Stash command-line arguments for regular datanode
|
||||
args = context.getArguments();
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormat.renameReservedPathsOnUpgrade;
|
||||
import static org.apache.hadoop.util.Time.now;
|
||||
|
||||
import java.io.FilterInputStream;
|
||||
|
@ -296,8 +297,10 @@ public class FSEditLogLoader {
|
|||
if (addCloseOp.aclEntries != null) {
|
||||
fsNamesys.getAclConfigFlag().checkForEditLog();
|
||||
}
|
||||
final String path =
|
||||
renameReservedPathsOnUpgrade(addCloseOp.path, logVersion);
|
||||
if (FSNamesystem.LOG.isDebugEnabled()) {
|
||||
FSNamesystem.LOG.debug(op.opCode + ": " + addCloseOp.path +
|
||||
FSNamesystem.LOG.debug(op.opCode + ": " + path +
|
||||
" numblocks : " + addCloseOp.blocks.length +
|
||||
" clientHolder " + addCloseOp.clientName +
|
||||
" clientMachine " + addCloseOp.clientMachine);
|
||||
|
@ -308,9 +311,9 @@ public class FSEditLogLoader {
|
|||
// 3. OP_ADD to open file for append
|
||||
|
||||
// See if the file already exists (persistBlocks call)
|
||||
final INodesInPath iip = fsDir.getLastINodeInPath(addCloseOp.path);
|
||||
final INodesInPath iip = fsDir.getLastINodeInPath(path);
|
||||
final INodeFile oldFile = INodeFile.valueOf(
|
||||
iip.getINode(0), addCloseOp.path, true);
|
||||
iip.getINode(0), path, true);
|
||||
INodeFile newFile = oldFile;
|
||||
if (oldFile == null) { // this is OP_ADD on a new file (case 1)
|
||||
// versions > 0 support per file replication
|
||||
|
@ -323,11 +326,11 @@ public class FSEditLogLoader {
|
|||
inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion,
|
||||
lastInodeId);
|
||||
newFile = fsDir.unprotectedAddFile(inodeId,
|
||||
addCloseOp.path, addCloseOp.permissions, addCloseOp.aclEntries,
|
||||
path, addCloseOp.permissions, addCloseOp.aclEntries,
|
||||
replication, addCloseOp.mtime, addCloseOp.atime,
|
||||
addCloseOp.blockSize, true, addCloseOp.clientName,
|
||||
addCloseOp.clientMachine);
|
||||
fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path);
|
||||
fsNamesys.leaseManager.addLease(addCloseOp.clientName, path);
|
||||
|
||||
// add the op into retry cache if necessary
|
||||
if (toAddRetryCache) {
|
||||
|
@ -343,11 +346,11 @@ public class FSEditLogLoader {
|
|||
FSNamesystem.LOG.debug("Reopening an already-closed file " +
|
||||
"for append");
|
||||
}
|
||||
LocatedBlock lb = fsNamesys.prepareFileForWrite(addCloseOp.path,
|
||||
LocatedBlock lb = fsNamesys.prepareFileForWrite(path,
|
||||
oldFile, addCloseOp.clientName, addCloseOp.clientMachine, null,
|
||||
false, iip.getLatestSnapshotId(), false);
|
||||
newFile = INodeFile.valueOf(fsDir.getINode(addCloseOp.path),
|
||||
addCloseOp.path, true);
|
||||
newFile = INodeFile.valueOf(fsDir.getINode(path),
|
||||
path, true);
|
||||
|
||||
// add the op into retry cache is necessary
|
||||
if (toAddRetryCache) {
|
||||
|
@ -368,16 +371,17 @@ public class FSEditLogLoader {
|
|||
}
|
||||
case OP_CLOSE: {
|
||||
AddCloseOp addCloseOp = (AddCloseOp)op;
|
||||
|
||||
final String path =
|
||||
renameReservedPathsOnUpgrade(addCloseOp.path, logVersion);
|
||||
if (FSNamesystem.LOG.isDebugEnabled()) {
|
||||
FSNamesystem.LOG.debug(op.opCode + ": " + addCloseOp.path +
|
||||
FSNamesystem.LOG.debug(op.opCode + ": " + path +
|
||||
" numblocks : " + addCloseOp.blocks.length +
|
||||
" clientHolder " + addCloseOp.clientName +
|
||||
" clientMachine " + addCloseOp.clientMachine);
|
||||
}
|
||||
|
||||
final INodesInPath iip = fsDir.getLastINodeInPath(addCloseOp.path);
|
||||
final INodeFile file = INodeFile.valueOf(iip.getINode(0), addCloseOp.path);
|
||||
final INodesInPath iip = fsDir.getLastINodeInPath(path);
|
||||
final INodeFile file = INodeFile.valueOf(iip.getINode(0), path);
|
||||
|
||||
// Update the salient file attributes.
|
||||
file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
|
||||
|
@ -391,24 +395,26 @@ public class FSEditLogLoader {
|
|||
// could show up twice in a row. But after that version, this
|
||||
// should be fixed, so we should treat it as an error.
|
||||
throw new IOException(
|
||||
"File is not under construction: " + addCloseOp.path);
|
||||
"File is not under construction: " + path);
|
||||
}
|
||||
// One might expect that you could use removeLease(holder, path) here,
|
||||
// but OP_CLOSE doesn't serialize the holder. So, remove by path.
|
||||
if (file.isUnderConstruction()) {
|
||||
fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path);
|
||||
fsNamesys.leaseManager.removeLeaseWithPrefixPath(path);
|
||||
file.toCompleteFile(file.getModificationTime());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_UPDATE_BLOCKS: {
|
||||
UpdateBlocksOp updateOp = (UpdateBlocksOp)op;
|
||||
final String path =
|
||||
renameReservedPathsOnUpgrade(updateOp.path, logVersion);
|
||||
if (FSNamesystem.LOG.isDebugEnabled()) {
|
||||
FSNamesystem.LOG.debug(op.opCode + ": " + updateOp.path +
|
||||
FSNamesystem.LOG.debug(op.opCode + ": " + path +
|
||||
" numblocks : " + updateOp.blocks.length);
|
||||
}
|
||||
INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(updateOp.path),
|
||||
updateOp.path);
|
||||
INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(path),
|
||||
path);
|
||||
// Update in-memory data structures
|
||||
updateBlocks(fsDir, updateOp, oldFile);
|
||||
|
||||
|
@ -419,7 +425,7 @@ public class FSEditLogLoader {
|
|||
}
|
||||
case OP_ADD_BLOCK: {
|
||||
AddBlockOp addBlockOp = (AddBlockOp) op;
|
||||
String path = addBlockOp.getPath();
|
||||
String path = renameReservedPathsOnUpgrade(addBlockOp.getPath(), logVersion);
|
||||
if (FSNamesystem.LOG.isDebugEnabled()) {
|
||||
FSNamesystem.LOG.debug(op.opCode + ": " + path +
|
||||
" new block id : " + addBlockOp.getLastBlock().getBlockId());
|
||||
|
@ -433,14 +439,20 @@ public class FSEditLogLoader {
|
|||
SetReplicationOp setReplicationOp = (SetReplicationOp)op;
|
||||
short replication = fsNamesys.getBlockManager().adjustReplication(
|
||||
setReplicationOp.replication);
|
||||
fsDir.unprotectedSetReplication(setReplicationOp.path,
|
||||
fsDir.unprotectedSetReplication(
|
||||
renameReservedPathsOnUpgrade(setReplicationOp.path, logVersion),
|
||||
replication, null);
|
||||
break;
|
||||
}
|
||||
case OP_CONCAT_DELETE: {
|
||||
ConcatDeleteOp concatDeleteOp = (ConcatDeleteOp)op;
|
||||
fsDir.unprotectedConcat(concatDeleteOp.trg, concatDeleteOp.srcs,
|
||||
concatDeleteOp.timestamp);
|
||||
String trg = renameReservedPathsOnUpgrade(concatDeleteOp.trg, logVersion);
|
||||
String[] srcs = new String[concatDeleteOp.srcs.length];
|
||||
for (int i=0; i<srcs.length; i++) {
|
||||
srcs[i] =
|
||||
renameReservedPathsOnUpgrade(concatDeleteOp.srcs[i], logVersion);
|
||||
}
|
||||
fsDir.unprotectedConcat(trg, srcs, concatDeleteOp.timestamp);
|
||||
|
||||
if (toAddRetryCache) {
|
||||
fsNamesys.addCacheEntry(concatDeleteOp.rpcClientId,
|
||||
|
@ -450,7 +462,9 @@ public class FSEditLogLoader {
|
|||
}
|
||||
case OP_RENAME_OLD: {
|
||||
RenameOldOp renameOp = (RenameOldOp)op;
|
||||
fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst,
|
||||
final String src = renameReservedPathsOnUpgrade(renameOp.src, logVersion);
|
||||
final String dst = renameReservedPathsOnUpgrade(renameOp.dst, logVersion);
|
||||
fsDir.unprotectedRenameTo(src, dst,
|
||||
renameOp.timestamp);
|
||||
|
||||
if (toAddRetryCache) {
|
||||
|
@ -460,7 +474,9 @@ public class FSEditLogLoader {
|
|||
}
|
||||
case OP_DELETE: {
|
||||
DeleteOp deleteOp = (DeleteOp)op;
|
||||
fsDir.unprotectedDelete(deleteOp.path, deleteOp.timestamp);
|
||||
fsDir.unprotectedDelete(
|
||||
renameReservedPathsOnUpgrade(deleteOp.path, logVersion),
|
||||
deleteOp.timestamp);
|
||||
|
||||
if (toAddRetryCache) {
|
||||
fsNamesys.addCacheEntry(deleteOp.rpcClientId, deleteOp.rpcCallId);
|
||||
|
@ -474,8 +490,9 @@ public class FSEditLogLoader {
|
|||
}
|
||||
inodeId = getAndUpdateLastInodeId(mkdirOp.inodeId, logVersion,
|
||||
lastInodeId);
|
||||
fsDir.unprotectedMkdir(inodeId, mkdirOp.path, mkdirOp.permissions,
|
||||
mkdirOp.aclEntries, mkdirOp.timestamp);
|
||||
fsDir.unprotectedMkdir(inodeId,
|
||||
renameReservedPathsOnUpgrade(mkdirOp.path, logVersion),
|
||||
mkdirOp.permissions, mkdirOp.aclEntries, mkdirOp.timestamp);
|
||||
break;
|
||||
}
|
||||
case OP_SET_GENSTAMP_V1: {
|
||||
|
@ -485,53 +502,56 @@ public class FSEditLogLoader {
|
|||
}
|
||||
case OP_SET_PERMISSIONS: {
|
||||
SetPermissionsOp setPermissionsOp = (SetPermissionsOp)op;
|
||||
fsDir.unprotectedSetPermission(setPermissionsOp.src,
|
||||
setPermissionsOp.permissions);
|
||||
fsDir.unprotectedSetPermission(
|
||||
renameReservedPathsOnUpgrade(setPermissionsOp.src, logVersion),
|
||||
setPermissionsOp.permissions);
|
||||
break;
|
||||
}
|
||||
case OP_SET_OWNER: {
|
||||
SetOwnerOp setOwnerOp = (SetOwnerOp)op;
|
||||
fsDir.unprotectedSetOwner(setOwnerOp.src, setOwnerOp.username,
|
||||
setOwnerOp.groupname);
|
||||
fsDir.unprotectedSetOwner(
|
||||
renameReservedPathsOnUpgrade(setOwnerOp.src, logVersion),
|
||||
setOwnerOp.username, setOwnerOp.groupname);
|
||||
break;
|
||||
}
|
||||
case OP_SET_NS_QUOTA: {
|
||||
SetNSQuotaOp setNSQuotaOp = (SetNSQuotaOp)op;
|
||||
fsDir.unprotectedSetQuota(setNSQuotaOp.src,
|
||||
setNSQuotaOp.nsQuota,
|
||||
HdfsConstants.QUOTA_DONT_SET);
|
||||
fsDir.unprotectedSetQuota(
|
||||
renameReservedPathsOnUpgrade(setNSQuotaOp.src, logVersion),
|
||||
setNSQuotaOp.nsQuota, HdfsConstants.QUOTA_DONT_SET);
|
||||
break;
|
||||
}
|
||||
case OP_CLEAR_NS_QUOTA: {
|
||||
ClearNSQuotaOp clearNSQuotaOp = (ClearNSQuotaOp)op;
|
||||
fsDir.unprotectedSetQuota(clearNSQuotaOp.src,
|
||||
HdfsConstants.QUOTA_RESET,
|
||||
HdfsConstants.QUOTA_DONT_SET);
|
||||
fsDir.unprotectedSetQuota(
|
||||
renameReservedPathsOnUpgrade(clearNSQuotaOp.src, logVersion),
|
||||
HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
|
||||
break;
|
||||
}
|
||||
|
||||
case OP_SET_QUOTA:
|
||||
SetQuotaOp setQuotaOp = (SetQuotaOp)op;
|
||||
fsDir.unprotectedSetQuota(setQuotaOp.src,
|
||||
setQuotaOp.nsQuota,
|
||||
setQuotaOp.dsQuota);
|
||||
fsDir.unprotectedSetQuota(
|
||||
renameReservedPathsOnUpgrade(setQuotaOp.src, logVersion),
|
||||
setQuotaOp.nsQuota, setQuotaOp.dsQuota);
|
||||
break;
|
||||
|
||||
case OP_TIMES: {
|
||||
TimesOp timesOp = (TimesOp)op;
|
||||
|
||||
fsDir.unprotectedSetTimes(timesOp.path,
|
||||
timesOp.mtime,
|
||||
timesOp.atime, true);
|
||||
fsDir.unprotectedSetTimes(
|
||||
renameReservedPathsOnUpgrade(timesOp.path, logVersion),
|
||||
timesOp.mtime, timesOp.atime, true);
|
||||
break;
|
||||
}
|
||||
case OP_SYMLINK: {
|
||||
SymlinkOp symlinkOp = (SymlinkOp)op;
|
||||
inodeId = getAndUpdateLastInodeId(symlinkOp.inodeId, logVersion,
|
||||
lastInodeId);
|
||||
fsDir.unprotectedAddSymlink(inodeId, symlinkOp.path,
|
||||
symlinkOp.value, symlinkOp.mtime,
|
||||
symlinkOp.atime, symlinkOp.permissionStatus);
|
||||
fsDir.unprotectedAddSymlink(inodeId,
|
||||
renameReservedPathsOnUpgrade(symlinkOp.path, logVersion),
|
||||
symlinkOp.value, symlinkOp.mtime, symlinkOp.atime,
|
||||
symlinkOp.permissionStatus);
|
||||
|
||||
if (toAddRetryCache) {
|
||||
fsNamesys.addCacheEntry(symlinkOp.rpcClientId, symlinkOp.rpcCallId);
|
||||
|
@ -540,8 +560,10 @@ public class FSEditLogLoader {
|
|||
}
|
||||
case OP_RENAME: {
|
||||
RenameOp renameOp = (RenameOp)op;
|
||||
fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst,
|
||||
renameOp.timestamp, renameOp.options);
|
||||
fsDir.unprotectedRenameTo(
|
||||
renameReservedPathsOnUpgrade(renameOp.src, logVersion),
|
||||
renameReservedPathsOnUpgrade(renameOp.dst, logVersion),
|
||||
renameOp.timestamp, renameOp.options);
|
||||
|
||||
if (toAddRetryCache) {
|
||||
fsNamesys.addCacheEntry(renameOp.rpcClientId, renameOp.rpcCallId);
|
||||
|
@ -584,10 +606,12 @@ public class FSEditLogLoader {
|
|||
|
||||
Lease lease = fsNamesys.leaseManager.getLease(
|
||||
reassignLeaseOp.leaseHolder);
|
||||
INodeFile pendingFile = fsDir.getINode(reassignLeaseOp.path).asFile();
|
||||
final String path =
|
||||
renameReservedPathsOnUpgrade(reassignLeaseOp.path, logVersion);
|
||||
INodeFile pendingFile = fsDir.getINode(path).asFile();
|
||||
Preconditions.checkState(pendingFile.isUnderConstruction());
|
||||
fsNamesys.reassignLeaseInternal(lease,
|
||||
reassignLeaseOp.path, reassignLeaseOp.newHolder, pendingFile);
|
||||
path, reassignLeaseOp.newHolder, pendingFile);
|
||||
break;
|
||||
}
|
||||
case OP_START_LOG_SEGMENT:
|
||||
|
@ -597,8 +621,11 @@ public class FSEditLogLoader {
|
|||
}
|
||||
case OP_CREATE_SNAPSHOT: {
|
||||
CreateSnapshotOp createSnapshotOp = (CreateSnapshotOp) op;
|
||||
final String snapshotRoot =
|
||||
renameReservedPathsOnUpgrade(createSnapshotOp.snapshotRoot,
|
||||
logVersion);
|
||||
String path = fsNamesys.getSnapshotManager().createSnapshot(
|
||||
createSnapshotOp.snapshotRoot, createSnapshotOp.snapshotName);
|
||||
snapshotRoot, createSnapshotOp.snapshotName);
|
||||
if (toAddRetryCache) {
|
||||
fsNamesys.addCacheEntryWithPayload(createSnapshotOp.rpcClientId,
|
||||
createSnapshotOp.rpcCallId, path);
|
||||
|
@ -609,8 +636,11 @@ public class FSEditLogLoader {
|
|||
DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op;
|
||||
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
|
||||
List<INode> removedINodes = new ChunkedArrayList<INode>();
|
||||
final String snapshotRoot =
|
||||
renameReservedPathsOnUpgrade(deleteSnapshotOp.snapshotRoot,
|
||||
logVersion);
|
||||
fsNamesys.getSnapshotManager().deleteSnapshot(
|
||||
deleteSnapshotOp.snapshotRoot, deleteSnapshotOp.snapshotName,
|
||||
snapshotRoot, deleteSnapshotOp.snapshotName,
|
||||
collectedBlocks, removedINodes);
|
||||
fsNamesys.removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
|
||||
collectedBlocks.clear();
|
||||
|
@ -625,8 +655,11 @@ public class FSEditLogLoader {
|
|||
}
|
||||
case OP_RENAME_SNAPSHOT: {
|
||||
RenameSnapshotOp renameSnapshotOp = (RenameSnapshotOp) op;
|
||||
final String snapshotRoot =
|
||||
renameReservedPathsOnUpgrade(renameSnapshotOp.snapshotRoot,
|
||||
logVersion);
|
||||
fsNamesys.getSnapshotManager().renameSnapshot(
|
||||
renameSnapshotOp.snapshotRoot, renameSnapshotOp.snapshotOldName,
|
||||
snapshotRoot, renameSnapshotOp.snapshotOldName,
|
||||
renameSnapshotOp.snapshotNewName);
|
||||
|
||||
if (toAddRetryCache) {
|
||||
|
@ -637,14 +670,19 @@ public class FSEditLogLoader {
|
|||
}
|
||||
case OP_ALLOW_SNAPSHOT: {
|
||||
AllowSnapshotOp allowSnapshotOp = (AllowSnapshotOp) op;
|
||||
final String snapshotRoot =
|
||||
renameReservedPathsOnUpgrade(allowSnapshotOp.snapshotRoot, logVersion);
|
||||
fsNamesys.getSnapshotManager().setSnapshottable(
|
||||
allowSnapshotOp.snapshotRoot, false);
|
||||
snapshotRoot, false);
|
||||
break;
|
||||
}
|
||||
case OP_DISALLOW_SNAPSHOT: {
|
||||
DisallowSnapshotOp disallowSnapshotOp = (DisallowSnapshotOp) op;
|
||||
final String snapshotRoot =
|
||||
renameReservedPathsOnUpgrade(disallowSnapshotOp.snapshotRoot,
|
||||
logVersion);
|
||||
fsNamesys.getSnapshotManager().resetSnapshottable(
|
||||
disallowSnapshotOp.snapshotRoot);
|
||||
snapshotRoot);
|
||||
break;
|
||||
}
|
||||
case OP_SET_GENSTAMP_V2: {
|
||||
|
|
|
@ -32,12 +32,13 @@ import java.security.DigestOutputStream;
|
|||
import java.security.MessageDigest;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -45,7 +46,9 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclFsImageProto;
|
||||
|
@ -54,6 +57,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
|||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
|
||||
|
@ -69,6 +73,10 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
|
|||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
import org.apache.hadoop.io.MD5Hash;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* Contains inner classes for reading or writing the on-disk format for
|
||||
|
@ -415,7 +423,8 @@ public class FSImageFormat {
|
|||
}
|
||||
|
||||
/**
|
||||
* load fsimage files assuming only local names are stored
|
||||
* load fsimage files assuming only local names are stored. Used when
|
||||
* snapshots are not supported by the layout version.
|
||||
*
|
||||
* @param numFiles number of files expected to be read
|
||||
* @param in image input stream
|
||||
|
@ -531,6 +540,8 @@ public class FSImageFormat {
|
|||
*/
|
||||
private int loadDirectory(DataInput in, Counter counter) throws IOException {
|
||||
String parentPath = FSImageSerialization.readString(in);
|
||||
// Rename .snapshot paths if we're doing an upgrade
|
||||
parentPath = renameReservedPathsOnUpgrade(parentPath, getLayoutVersion());
|
||||
final INodeDirectory parent = INodeDirectory.valueOf(
|
||||
namesystem.dir.rootDir.getNode(parentPath, true), parentPath);
|
||||
return loadChildren(parent, in, counter);
|
||||
|
@ -590,11 +601,9 @@ public class FSImageFormat {
|
|||
*/
|
||||
private void addToParent(INodeDirectory parent, INode child) {
|
||||
FSDirectory fsDir = namesystem.dir;
|
||||
if (parent == fsDir.rootDir && FSDirectory.isReservedName(child)) {
|
||||
throw new HadoopIllegalArgumentException("File name \""
|
||||
+ child.getLocalName() + "\" is reserved. Please "
|
||||
+ " change the name of the existing file or directory to another "
|
||||
+ "name before upgrading to this release.");
|
||||
if (parent == fsDir.rootDir) {
|
||||
child.setLocalName(renameReservedRootComponentOnUpgrade(
|
||||
child.getLocalNameBytes(), getLayoutVersion()));
|
||||
}
|
||||
// NOTE: This does not update space counts for parents
|
||||
if (!parent.addChild(child)) {
|
||||
|
@ -631,7 +640,9 @@ public class FSImageFormat {
|
|||
public INode loadINodeWithLocalName(boolean isSnapshotINode,
|
||||
DataInput in, boolean updateINodeMap, Counter counter)
|
||||
throws IOException {
|
||||
final byte[] localName = FSImageSerialization.readLocalName(in);
|
||||
byte[] localName = FSImageSerialization.readLocalName(in);
|
||||
localName =
|
||||
renameReservedComponentOnUpgrade(localName, getLayoutVersion());
|
||||
INode inode = loadINode(localName, isSnapshotINode, in, counter);
|
||||
if (updateINodeMap
|
||||
&& LayoutVersion.supports(Feature.ADD_INODE_ID, getLayoutVersion())) {
|
||||
|
@ -968,7 +979,156 @@ public class FSImageFormat {
|
|||
return snapshotMap.get(in.readInt());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@VisibleForTesting
|
||||
public static TreeMap<String, String> renameReservedMap =
|
||||
new TreeMap<String, String>();
|
||||
|
||||
/**
|
||||
* Use the default key-value pairs that will be used to determine how to
|
||||
* rename reserved paths on upgrade.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public static void useDefaultRenameReservedPairs() {
|
||||
renameReservedMap.clear();
|
||||
for (String key: HdfsConstants.RESERVED_PATH_COMPONENTS) {
|
||||
renameReservedMap.put(
|
||||
key,
|
||||
key + "." + LayoutVersion.getCurrentLayoutVersion() + "."
|
||||
+ "UPGRADE_RENAMED");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the key-value pairs that will be used to determine how to rename
|
||||
* reserved paths on upgrade.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public static void setRenameReservedPairs(String renameReserved) {
|
||||
// Clear and set the default values
|
||||
useDefaultRenameReservedPairs();
|
||||
// Overwrite with provided values
|
||||
setRenameReservedMapInternal(renameReserved);
|
||||
}
|
||||
|
||||
private static void setRenameReservedMapInternal(String renameReserved) {
|
||||
Collection<String> pairs =
|
||||
StringUtils.getTrimmedStringCollection(renameReserved);
|
||||
for (String p : pairs) {
|
||||
String[] pair = StringUtils.split(p, '/', '=');
|
||||
Preconditions.checkArgument(pair.length == 2,
|
||||
"Could not parse key-value pair " + p);
|
||||
String key = pair[0];
|
||||
String value = pair[1];
|
||||
Preconditions.checkArgument(DFSUtil.isReservedPathComponent(key),
|
||||
"Unknown reserved path " + key);
|
||||
Preconditions.checkArgument(DFSUtil.isValidNameForComponent(value),
|
||||
"Invalid rename path for " + key + ": " + value);
|
||||
LOG.info("Will rename reserved path " + key + " to " + value);
|
||||
renameReservedMap.put(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* When upgrading from an old version, the filesystem could contain paths
|
||||
* that are now reserved in the new version (e.g. .snapshot). This renames
|
||||
* these new reserved paths to a user-specified value to avoid collisions
|
||||
* with the reserved name.
|
||||
*
|
||||
* @param path Old path potentially containing a reserved path
|
||||
* @return New path with reserved path components renamed to user value
|
||||
*/
|
||||
static String renameReservedPathsOnUpgrade(String path,
|
||||
final int layoutVersion) {
|
||||
final String oldPath = path;
|
||||
// If any known LVs aren't supported, we're doing an upgrade
|
||||
if (!LayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
|
||||
String[] components = INode.getPathNames(path);
|
||||
// Only need to worry about the root directory
|
||||
if (components.length > 1) {
|
||||
components[1] = DFSUtil.bytes2String(
|
||||
renameReservedRootComponentOnUpgrade(
|
||||
DFSUtil.string2Bytes(components[1]),
|
||||
layoutVersion));
|
||||
path = DFSUtil.strings2PathString(components);
|
||||
}
|
||||
}
|
||||
if (!LayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
|
||||
String[] components = INode.getPathNames(path);
|
||||
// Special case the root path
|
||||
if (components.length == 0) {
|
||||
return path;
|
||||
}
|
||||
for (int i=0; i<components.length; i++) {
|
||||
components[i] = DFSUtil.bytes2String(
|
||||
renameReservedComponentOnUpgrade(
|
||||
DFSUtil.string2Bytes(components[i]),
|
||||
layoutVersion));
|
||||
}
|
||||
path = DFSUtil.strings2PathString(components);
|
||||
}
|
||||
|
||||
if (!path.equals(oldPath)) {
|
||||
LOG.info("Upgrade process renamed reserved path " + oldPath + " to "
|
||||
+ path);
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
private final static String RESERVED_ERROR_MSG =
|
||||
FSDirectory.DOT_RESERVED_PATH_PREFIX + " is a reserved path and "
|
||||
+ HdfsConstants.DOT_SNAPSHOT_DIR + " is a reserved path component in"
|
||||
+ " this version of HDFS. Please rollback and delete or rename"
|
||||
+ " this path, or upgrade with the "
|
||||
+ StartupOption.RENAMERESERVED.getName()
|
||||
+ " [key-value pairs]"
|
||||
+ " option to automatically rename these paths during upgrade.";
|
||||
|
||||
/**
|
||||
* Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
|
||||
* byte array path component.
|
||||
*/
|
||||
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
|
||||
final int layoutVersion) {
|
||||
// If the LV doesn't support snapshots, we're doing an upgrade
|
||||
if (!LayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
|
||||
if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) {
|
||||
Preconditions.checkArgument(
|
||||
renameReservedMap != null &&
|
||||
renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
|
||||
RESERVED_ERROR_MSG);
|
||||
component =
|
||||
DFSUtil.string2Bytes(renameReservedMap
|
||||
.get(HdfsConstants.DOT_SNAPSHOT_DIR));
|
||||
}
|
||||
}
|
||||
return component;
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
|
||||
* byte array path component.
|
||||
*/
|
||||
private static byte[] renameReservedRootComponentOnUpgrade(byte[] component,
|
||||
final int layoutVersion) {
|
||||
// If the LV doesn't support inode IDs, we're doing an upgrade
|
||||
if (!LayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
|
||||
if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) {
|
||||
Preconditions.checkArgument(
|
||||
renameReservedMap != null &&
|
||||
renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING),
|
||||
RESERVED_ERROR_MSG);
|
||||
final String renameString = renameReservedMap
|
||||
.get(FSDirectory.DOT_RESERVED_STRING);
|
||||
component =
|
||||
DFSUtil.string2Bytes(renameString);
|
||||
LOG.info("Renamed root path " + FSDirectory.DOT_RESERVED_STRING
|
||||
+ " to " + renameString);
|
||||
}
|
||||
}
|
||||
return component;
|
||||
}
|
||||
|
||||
/**
|
||||
* A one-shot class responsible for writing an image file.
|
||||
* The write() function should be called once, after which the getter
|
||||
|
|
|
@ -1166,7 +1166,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
if (isInSafeMode()) {
|
||||
SafeModeException se = new SafeModeException(errorMsg, safeMode);
|
||||
if (haEnabled && haContext != null
|
||||
&& haContext.getState().getServiceState() == HAServiceState.ACTIVE) {
|
||||
&& haContext.getState().getServiceState() == HAServiceState.ACTIVE
|
||||
&& shouldRetrySafeMode(this.safeMode)) {
|
||||
throw new RetriableException(se);
|
||||
} else {
|
||||
throw se;
|
||||
|
@ -1174,6 +1175,18 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* We already know that the safemode is on. We will throw a RetriableException
|
||||
* if the safemode is not manual or caused by low resource.
|
||||
*/
|
||||
private boolean shouldRetrySafeMode(SafeModeInfo safeMode) {
|
||||
if (safeMode == null) {
|
||||
return false;
|
||||
} else {
|
||||
return !safeMode.isManual() && !safeMode.areResourcesLow();
|
||||
}
|
||||
}
|
||||
|
||||
public static Collection<URI> getNamespaceDirs(Configuration conf) {
|
||||
return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY);
|
||||
}
|
||||
|
|
|
@ -212,7 +212,9 @@ public class NameNode implements NameNodeStatusMXBean {
|
|||
+ StartupOption.CLUSTERID.getName() + " cid ] ["
|
||||
+ StartupOption.FORCE.getName() + "] ["
|
||||
+ StartupOption.NONINTERACTIVE.getName() + "] ] | ["
|
||||
+ StartupOption.UPGRADE.getName() + "] | ["
|
||||
+ StartupOption.UPGRADE.getName() +
|
||||
" [" + StartupOption.CLUSTERID.getName() + " cid]" +
|
||||
" [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | ["
|
||||
+ StartupOption.ROLLBACK.getName() + "] | ["
|
||||
+ StartupOption.FINALIZE.getName() + "] | ["
|
||||
+ StartupOption.IMPORT.getName() + "] | ["
|
||||
|
@ -1056,7 +1058,8 @@ public class NameNode implements NameNodeStatusMXBean {
|
|||
out.println(USAGE + "\n");
|
||||
}
|
||||
|
||||
private static StartupOption parseArguments(String args[]) {
|
||||
@VisibleForTesting
|
||||
static StartupOption parseArguments(String args[]) {
|
||||
int argsLen = (args == null) ? 0 : args.length;
|
||||
StartupOption startOpt = StartupOption.REGULAR;
|
||||
for(int i=0; i < argsLen; i++) {
|
||||
|
@ -1103,11 +1106,33 @@ public class NameNode implements NameNodeStatusMXBean {
|
|||
startOpt = StartupOption.CHECKPOINT;
|
||||
} else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
|
||||
startOpt = StartupOption.UPGRADE;
|
||||
// might be followed by two args
|
||||
if (i + 2 < argsLen
|
||||
&& args[i + 1].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
|
||||
i += 2;
|
||||
startOpt.setClusterId(args[i]);
|
||||
/* Can be followed by CLUSTERID with a required parameter or
|
||||
* RENAMERESERVED with an optional parameter
|
||||
*/
|
||||
while (i + 1 < argsLen) {
|
||||
String flag = args[i + 1];
|
||||
if (flag.equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
|
||||
if (i + 2 < argsLen) {
|
||||
i += 2;
|
||||
startOpt.setClusterId(args[i]);
|
||||
} else {
|
||||
LOG.fatal("Must specify a valid cluster ID after the "
|
||||
+ StartupOption.CLUSTERID.getName() + " flag");
|
||||
return null;
|
||||
}
|
||||
} else if (flag.equalsIgnoreCase(StartupOption.RENAMERESERVED
|
||||
.getName())) {
|
||||
if (i + 2 < argsLen) {
|
||||
FSImageFormat.setRenameReservedPairs(args[i + 2]);
|
||||
i += 2;
|
||||
} else {
|
||||
FSImageFormat.useDefaultRenameReservedPairs();
|
||||
i += 1;
|
||||
}
|
||||
} else {
|
||||
LOG.fatal("Unknown upgrade flag " + flag);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
} else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
|
||||
startOpt = StartupOption.ROLLBACK;
|
||||
|
|
|
@ -140,6 +140,18 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
return maxTtl;
|
||||
}
|
||||
|
||||
private static Long parseLimitString(String limitString) {
|
||||
Long limit = null;
|
||||
if (limitString != null) {
|
||||
if (limitString.equalsIgnoreCase("unlimited")) {
|
||||
limit = CachePoolInfo.LIMIT_UNLIMITED;
|
||||
} else {
|
||||
limit = Long.parseLong(limitString);
|
||||
}
|
||||
}
|
||||
return limit;
|
||||
}
|
||||
|
||||
private static Expiration parseExpirationString(String ttlString)
|
||||
throws IOException {
|
||||
Expiration ex = null;
|
||||
|
@ -650,8 +662,8 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
info.setMode(new FsPermission(mode));
|
||||
}
|
||||
String limitString = StringUtils.popOptionWithArgument("-limit", args);
|
||||
if (limitString != null) {
|
||||
long limit = Long.parseLong(limitString);
|
||||
Long limit = parseLimitString(limitString);
|
||||
if (limit != null) {
|
||||
info.setLimit(limit);
|
||||
}
|
||||
String maxTtlString = StringUtils.popOptionWithArgument("-maxTtl", args);
|
||||
|
@ -726,8 +738,7 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
Integer mode = (modeString == null) ?
|
||||
null : Integer.parseInt(modeString, 8);
|
||||
String limitString = StringUtils.popOptionWithArgument("-limit", args);
|
||||
Long limit = (limitString == null) ?
|
||||
null : Long.parseLong(limitString);
|
||||
Long limit = parseLimitString(limitString);
|
||||
String maxTtlString = StringUtils.popOptionWithArgument("-maxTtl", args);
|
||||
Long maxTtl = null;
|
||||
try {
|
||||
|
@ -962,9 +973,8 @@ public class CacheAdmin extends Configured implements Tool {
|
|||
if (numResults > 0) {
|
||||
System.out.print(listing);
|
||||
}
|
||||
// If there are no results, we return 1 (failure exit code);
|
||||
// otherwise we return 0 (success exit code).
|
||||
return (numResults == 0) ? 1 : 0;
|
||||
// If list pools succeed, we return 0 (success exit code)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -191,6 +191,9 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
int maxFailoverAttempts = conf.getInt(
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
|
||||
int maxRetryAttempts = conf.getInt(
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
|
||||
int failoverSleepBaseMillis = conf.getInt(
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
|
||||
|
@ -200,7 +203,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
|
||||
this.retryPolicy = RetryPolicies
|
||||
.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
|
||||
maxFailoverAttempts, failoverSleepBaseMillis,
|
||||
maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis,
|
||||
failoverSleepMaxMillis);
|
||||
}
|
||||
|
||||
|
|
|
@ -435,7 +435,7 @@ HDFS Users Guide
|
|||
state it was in before the upgrade. HDFS upgrade is described in more
|
||||
detail in {{{http://wiki.apache.org/hadoop/Hadoop_Upgrade}Hadoop Upgrade}}
|
||||
Wiki page. HDFS can have one such backup at a time. Before upgrading,
|
||||
administrators need to remove existing backupusing bin/hadoop dfsadmin
|
||||
administrators need to remove existing backup using bin/hadoop dfsadmin
|
||||
<<<-finalizeUpgrade>>> command. The following briefly describes the
|
||||
typical upgrade procedure:
|
||||
|
||||
|
@ -459,6 +459,33 @@ HDFS Users Guide
|
|||
|
||||
* start the cluster with rollback option. (<<<bin/start-dfs.sh -rollback>>>).
|
||||
|
||||
When upgrading to a new version of HDFS, it is necessary to rename or
|
||||
delete any paths that are reserved in the new version of HDFS. If the
|
||||
NameNode encounters a reserved path during upgrade, it will print an
|
||||
error like the following:
|
||||
|
||||
<<< /.reserved is a reserved path and .snapshot is a
|
||||
reserved path component in this version of HDFS. Please rollback and delete
|
||||
or rename this path, or upgrade with the -renameReserved [key-value pairs]
|
||||
option to automatically rename these paths during upgrade.>>>
|
||||
|
||||
Specifying <<<-upgrade -renameReserved [optional key-value pairs]>>> causes
|
||||
the NameNode to automatically rename any reserved paths found during
|
||||
startup. For example, to rename all paths named <<<.snapshot>>> to
|
||||
<<<.my-snapshot>>> and <<<.reserved>>> to <<<.my-reserved>>>, a user would
|
||||
specify <<<-upgrade -renameReserved
|
||||
.snapshot=.my-snapshot,.reserved=.my-reserved>>>.
|
||||
|
||||
If no key-value pairs are specified with <<<-renameReserved>>>, the
|
||||
NameNode will then suffix reserved paths with
|
||||
<<<.<LAYOUT-VERSION>.UPGRADE_RENAMED>>>, e.g.
|
||||
<<<.snapshot.-51.UPGRADE_RENAMED>>>.
|
||||
|
||||
There are some caveats to this renaming process. It's recommended,
|
||||
if possible, to first <<<hdfs dfsadmin -saveNamespace>>> before upgrading.
|
||||
This is because data inconsistency can result if an edit log operation
|
||||
refers to the destination of an automatically renamed file.
|
||||
|
||||
* File Permissions and Security
|
||||
|
||||
The file permissions are designed to be similar to file permissions on
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
|
||||
|
||||
<properties>
|
||||
<title>HFDS Snapshots</title>
|
||||
<title>HDFS Snapshots</title>
|
||||
</properties>
|
||||
|
||||
<body>
|
||||
|
@ -99,15 +99,22 @@
|
|||
<li>Copying a file from snapshot <code>s0</code>:
|
||||
<source>hdfs dfs -cp /foo/.snapshot/s0/bar /tmp</source></li>
|
||||
</ul>
|
||||
<p>
|
||||
<b>Note</b> that the name ".snapshot" is now a reserved file name in HDFS
|
||||
so that users cannot create a file/directory with ".snapshot" as the name.
|
||||
If ".snapshot" is used in a previous version of HDFS, it must be renamed before upgrade;
|
||||
otherwise, upgrade will fail.
|
||||
</p>
|
||||
</subsection>
|
||||
</section>
|
||||
|
||||
<section name="Upgrading to a version of HDFS with snapshots" id="Upgrade">
|
||||
|
||||
<p>
|
||||
The HDFS snapshot feature introduces a new reserved path name used to
|
||||
interact with snapshots: <tt>.snapshot</tt>. When upgrading from an
|
||||
older version of HDFS, existing paths named <tt>.snapshot</tt> need
|
||||
to first be renamed or deleted to avoid conflicting with the reserved path.
|
||||
See the upgrade section in
|
||||
<a href="HdfsUserGuide.html#Upgrade_and_Rollback">the HDFS user guide</a>
|
||||
for more information. </p>
|
||||
|
||||
</section>
|
||||
|
||||
<section name="Snapshot Operations" id="SnapshotOperations">
|
||||
<subsection name="Administrator Operations" id="AdministratorOperations">
|
||||
<p>
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.io.File;
|
|||
import java.io.FileOutputStream;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.TreeMap;
|
||||
|
@ -43,7 +44,9 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.junit.Test;
|
||||
|
@ -67,6 +70,7 @@ public class TestDFSUpgradeFromImage {
|
|||
private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
|
||||
private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
|
||||
private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz";
|
||||
private static final String HADOOP2_RESERVED_IMAGE = "hadoop-2-reserved.tgz";
|
||||
|
||||
private static class ReferenceFileInfo {
|
||||
String path;
|
||||
|
@ -320,6 +324,87 @@ public class TestDFSUpgradeFromImage {
|
|||
assertEquals("Upgrade did not fail with bad MD5", 1, md5failures);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test upgrade from 2.0 image with a variety of .snapshot and .reserved
|
||||
* paths to test renaming on upgrade
|
||||
*/
|
||||
@Test
|
||||
public void testUpgradeFromRel2ReservedImage() throws IOException {
|
||||
unpackStorage(HADOOP2_RESERVED_IMAGE);
|
||||
MiniDFSCluster cluster = null;
|
||||
// Try it once without setting the upgrade flag to ensure it fails
|
||||
try {
|
||||
cluster =
|
||||
new MiniDFSCluster.Builder(new Configuration())
|
||||
.format(false)
|
||||
.startupOption(StartupOption.UPGRADE)
|
||||
.numDataNodes(0).build();
|
||||
} catch (IllegalArgumentException e) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"reserved path component in this version",
|
||||
e);
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
// Try it again with a custom rename string
|
||||
try {
|
||||
FSImageFormat.setRenameReservedPairs(
|
||||
".snapshot=.user-snapshot," +
|
||||
".reserved=.my-reserved");
|
||||
cluster =
|
||||
new MiniDFSCluster.Builder(new Configuration())
|
||||
.format(false)
|
||||
.startupOption(StartupOption.UPGRADE)
|
||||
.numDataNodes(0).build();
|
||||
// Make sure the paths were renamed as expected
|
||||
DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
ArrayList<Path> toList = new ArrayList<Path>();
|
||||
ArrayList<String> found = new ArrayList<String>();
|
||||
toList.add(new Path("/"));
|
||||
while (!toList.isEmpty()) {
|
||||
Path p = toList.remove(0);
|
||||
FileStatus[] statuses = dfs.listStatus(p);
|
||||
for (FileStatus status: statuses) {
|
||||
final String path = status.getPath().toUri().getPath();
|
||||
System.out.println("Found path " + path);
|
||||
found.add(path);
|
||||
if (status.isDirectory()) {
|
||||
toList.add(status.getPath());
|
||||
}
|
||||
}
|
||||
}
|
||||
String[] expected = new String[] {
|
||||
"/edits",
|
||||
"/edits/.reserved",
|
||||
"/edits/.user-snapshot",
|
||||
"/edits/.user-snapshot/editsdir",
|
||||
"/edits/.user-snapshot/editsdir/editscontents",
|
||||
"/edits/.user-snapshot/editsdir/editsdir2",
|
||||
"/image",
|
||||
"/image/.reserved",
|
||||
"/image/.user-snapshot",
|
||||
"/image/.user-snapshot/imagedir",
|
||||
"/image/.user-snapshot/imagedir/imagecontents",
|
||||
"/image/.user-snapshot/imagedir/imagedir2",
|
||||
"/.my-reserved",
|
||||
"/.my-reserved/edits-touch",
|
||||
"/.my-reserved/image-touch"
|
||||
};
|
||||
|
||||
for (String s: expected) {
|
||||
assertTrue("Did not find expected path " + s, found.contains(s));
|
||||
}
|
||||
assertEquals("Found an unexpected path while listing filesystem",
|
||||
found.size(), expected.length);
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void recoverAllLeases(DFSClient dfs,
|
||||
Path path) throws IOException {
|
||||
|
|
|
@ -19,16 +19,19 @@ package org.apache.hadoop.hdfs;
|
|||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import com.google.common.base.Supplier;
|
||||
|
||||
import java.io.InputStream;
|
||||
import java.io.PrintWriter;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
|
@ -37,10 +40,8 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
@ -51,10 +52,7 @@ import com.google.common.io.NullOutputStream;
|
|||
public class TestDataTransferKeepalive {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
private MiniDFSCluster cluster;
|
||||
private FileSystem fs;
|
||||
private InetSocketAddress dnAddr;
|
||||
private DataNode dn;
|
||||
private DFSClient dfsClient;
|
||||
private static Path TEST_FILE = new Path("/test");
|
||||
|
||||
private static final int KEEPALIVE_TIMEOUT = 1000;
|
||||
|
@ -69,15 +67,7 @@ public class TestDataTransferKeepalive {
|
|||
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(1).build();
|
||||
fs = cluster.getFileSystem();
|
||||
dfsClient = ((DistributedFileSystem)fs).dfs;
|
||||
dfsClient.peerCache.clear();
|
||||
|
||||
String poolId = cluster.getNamesystem().getBlockPoolId();
|
||||
dn = cluster.getDataNodes().get(0);
|
||||
DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
|
||||
dn, poolId);
|
||||
dnAddr = NetUtils.createSocketAddr(dnReg.getXferAddr());
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -90,34 +80,86 @@ public class TestDataTransferKeepalive {
|
|||
* its configured keepalive timeout.
|
||||
*/
|
||||
@Test(timeout=30000)
|
||||
public void testKeepaliveTimeouts() throws Exception {
|
||||
public void testDatanodeRespectsKeepAliveTimeout() throws Exception {
|
||||
Configuration clientConf = new Configuration(conf);
|
||||
// Set a client socket cache expiry time much longer than
|
||||
// the datanode-side expiration time.
|
||||
final long CLIENT_EXPIRY_MS = 60000L;
|
||||
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
|
||||
PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT, CLIENT_EXPIRY_MS);
|
||||
DistributedFileSystem fs =
|
||||
(DistributedFileSystem)FileSystem.get(cluster.getURI(),
|
||||
clientConf);
|
||||
|
||||
DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
|
||||
|
||||
// Clients that write aren't currently re-used.
|
||||
assertEquals(0, dfsClient.peerCache.size());
|
||||
assertEquals(0, fs.dfs.peerCache.size());
|
||||
assertXceiverCount(0);
|
||||
|
||||
// Reads the file, so we should get a
|
||||
// cached socket, and should have an xceiver on the other side.
|
||||
DFSTestUtil.readFile(fs, TEST_FILE);
|
||||
assertEquals(1, dfsClient.peerCache.size());
|
||||
assertEquals(1, fs.dfs.peerCache.size());
|
||||
assertXceiverCount(1);
|
||||
|
||||
// Sleep for a bit longer than the keepalive timeout
|
||||
// and make sure the xceiver died.
|
||||
Thread.sleep(KEEPALIVE_TIMEOUT * 2);
|
||||
Thread.sleep(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT + 1);
|
||||
assertXceiverCount(0);
|
||||
|
||||
// The socket is still in the cache, because we don't
|
||||
// notice that it's closed until we try to read
|
||||
// from it again.
|
||||
assertEquals(1, dfsClient.peerCache.size());
|
||||
assertEquals(1, fs.dfs.peerCache.size());
|
||||
|
||||
// Take it out of the cache - reading should
|
||||
// give an EOF.
|
||||
Peer peer = dfsClient.peerCache.get(dn.getDatanodeId(), false);
|
||||
Peer peer = fs.dfs.peerCache.get(dn.getDatanodeId(), false);
|
||||
assertNotNull(peer);
|
||||
assertEquals(-1, peer.getInputStream().read());
|
||||
PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT,
|
||||
DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that the client respects its keepalive timeout.
|
||||
*/
|
||||
@Test(timeout=30000)
|
||||
public void testClientResponsesKeepAliveTimeout() throws Exception {
|
||||
Configuration clientConf = new Configuration(conf);
|
||||
// Set a client socket cache expiry time much shorter than
|
||||
// the datanode-side expiration time.
|
||||
final long CLIENT_EXPIRY_MS = 10L;
|
||||
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
|
||||
PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT, CLIENT_EXPIRY_MS);
|
||||
DistributedFileSystem fs =
|
||||
(DistributedFileSystem)FileSystem.get(cluster.getURI(),
|
||||
clientConf);
|
||||
|
||||
DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
|
||||
|
||||
// Clients that write aren't currently re-used.
|
||||
assertEquals(0, fs.dfs.peerCache.size());
|
||||
assertXceiverCount(0);
|
||||
|
||||
// Reads the file, so we should get a
|
||||
// cached socket, and should have an xceiver on the other side.
|
||||
DFSTestUtil.readFile(fs, TEST_FILE);
|
||||
assertEquals(1, fs.dfs.peerCache.size());
|
||||
assertXceiverCount(1);
|
||||
|
||||
// Sleep for a bit longer than the client keepalive timeout.
|
||||
Thread.sleep(CLIENT_EXPIRY_MS + 1);
|
||||
|
||||
// Taking out a peer which is expired should give a null.
|
||||
Peer peer = fs.dfs.peerCache.get(dn.getDatanodeId(), false);
|
||||
assertTrue(peer == null);
|
||||
|
||||
// The socket cache is now empty.
|
||||
assertEquals(0, fs.dfs.peerCache.size());
|
||||
PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT,
|
||||
DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -125,8 +167,17 @@ public class TestDataTransferKeepalive {
|
|||
* read bytes off the stream quickly. The datanode should time out sending the
|
||||
* chunks and the transceiver should die, even if it has a long keepalive.
|
||||
*/
|
||||
@Test(timeout=30000)
|
||||
@Test(timeout=300000)
|
||||
public void testSlowReader() throws Exception {
|
||||
// Set a client socket cache expiry time much longer than
|
||||
// the datanode-side expiration time.
|
||||
final long CLIENT_EXPIRY_MS = 600000L;
|
||||
Configuration clientConf = new Configuration(conf);
|
||||
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
|
||||
PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT, CLIENT_EXPIRY_MS);
|
||||
DistributedFileSystem fs =
|
||||
(DistributedFileSystem)FileSystem.get(cluster.getURI(),
|
||||
clientConf);
|
||||
// Restart the DN with a shorter write timeout.
|
||||
DataNodeProperties props = cluster.stopDataNode(0);
|
||||
props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
|
||||
|
@ -134,38 +185,31 @@ public class TestDataTransferKeepalive {
|
|||
props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
|
||||
120000);
|
||||
assertTrue(cluster.restartDataNode(props, true));
|
||||
dn = cluster.getDataNodes().get(0);
|
||||
// Wait for heartbeats to avoid a startup race where we
|
||||
// try to write the block while the DN is still starting.
|
||||
cluster.triggerHeartbeats();
|
||||
|
||||
dn = cluster.getDataNodes().get(0);
|
||||
|
||||
DFSTestUtil.createFile(fs, TEST_FILE, 1024*1024*8L, (short)1, 0L);
|
||||
FSDataInputStream stm = fs.open(TEST_FILE);
|
||||
try {
|
||||
stm.read();
|
||||
assertXceiverCount(1);
|
||||
stm.read();
|
||||
assertXceiverCount(1);
|
||||
|
||||
// Poll for 0 running xceivers. Allow up to 5 seconds for some slack.
|
||||
long totalSleepTime = 0;
|
||||
long sleepTime = WRITE_TIMEOUT + 100;
|
||||
while (getXceiverCountWithoutServer() > 0 && totalSleepTime < 5000) {
|
||||
Thread.sleep(sleepTime);
|
||||
totalSleepTime += sleepTime;
|
||||
sleepTime = 100;
|
||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||
public Boolean get() {
|
||||
// DN should time out in sendChunks, and this should force
|
||||
// the xceiver to exit.
|
||||
return getXceiverCountWithoutServer() == 0;
|
||||
}
|
||||
}, 500, 50000);
|
||||
|
||||
// DN should time out in sendChunks, and this should force
|
||||
// the xceiver to exit.
|
||||
assertXceiverCount(0);
|
||||
} finally {
|
||||
IOUtils.closeStream(stm);
|
||||
}
|
||||
IOUtils.closeStream(stm);
|
||||
}
|
||||
|
||||
@Test(timeout=30000)
|
||||
public void testManyClosedSocketsInCache() throws Exception {
|
||||
// Make a small file
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
|
||||
|
||||
// Insert a bunch of dead sockets in the cache, by opening
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import static org.apache.hadoop.http.HttpConfig.Policy.HTTP_AND_HTTPS;
|
||||
import static org.apache.hadoop.http.HttpConfig.Policy.HTTP_ONLY;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
public final class TestHttpPolicy {
|
||||
|
||||
@Test(expected = HadoopIllegalArgumentException.class)
|
||||
public void testInvalidPolicyValue() {
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, "invalid");
|
||||
DFSUtil.getHttpPolicy(conf);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeprecatedConfiguration() {
|
||||
Configuration conf = new Configuration(false);
|
||||
Assert.assertSame(HTTP_ONLY, DFSUtil.getHttpPolicy(conf));
|
||||
|
||||
conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
|
||||
Assert.assertSame(HTTP_AND_HTTPS, DFSUtil.getHttpPolicy(conf));
|
||||
|
||||
conf = new Configuration(false);
|
||||
conf.setBoolean(DFSConfigKeys.HADOOP_SSL_ENABLED_KEY, true);
|
||||
Assert.assertSame(HTTP_AND_HTTPS, DFSUtil.getHttpPolicy(conf));
|
||||
|
||||
conf = new Configuration(false);
|
||||
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HTTP_ONLY.name());
|
||||
conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
|
||||
Assert.assertSame(HTTP_ONLY, DFSUtil.getHttpPolicy(conf));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.client;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.commons.lang.SystemUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
import org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory;
|
||||
import org.apache.hadoop.hdfs.client.ShortCircuitSharedMemorySegment.Slot;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.Assert;
|
||||
|
||||
public class TestShortCircuitSharedMemorySegment {
|
||||
public static final Log LOG =
|
||||
LogFactory.getLog(TestShortCircuitSharedMemorySegment.class);
|
||||
|
||||
private static final File TEST_BASE =
|
||||
new File(System.getProperty("test.build.data", "/tmp"));
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
Assume.assumeTrue(NativeIO.isAvailable());
|
||||
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
|
||||
}
|
||||
|
||||
@Test(timeout=60000)
|
||||
public void testStartupShutdown() throws Exception {
|
||||
File path = new File(TEST_BASE, "testStartupShutdown");
|
||||
path.mkdirs();
|
||||
SharedFileDescriptorFactory factory =
|
||||
new SharedFileDescriptorFactory("shm_", path.getAbsolutePath());
|
||||
FileInputStream stream = factory.createDescriptor(4096);
|
||||
ShortCircuitSharedMemorySegment shm =
|
||||
new ShortCircuitSharedMemorySegment(stream);
|
||||
shm.close();
|
||||
stream.close();
|
||||
FileUtil.fullyDelete(path);
|
||||
}
|
||||
|
||||
@Test(timeout=60000)
|
||||
public void testAllocateSlots() throws Exception {
|
||||
File path = new File(TEST_BASE, "testAllocateSlots");
|
||||
path.mkdirs();
|
||||
SharedFileDescriptorFactory factory =
|
||||
new SharedFileDescriptorFactory("shm_", path.getAbsolutePath());
|
||||
FileInputStream stream = factory.createDescriptor(4096);
|
||||
ShortCircuitSharedMemorySegment shm =
|
||||
new ShortCircuitSharedMemorySegment(stream);
|
||||
int numSlots = 0;
|
||||
ArrayList<Slot> slots = new ArrayList<Slot>();
|
||||
while (true) {
|
||||
Slot slot = shm.allocateNextSlot();
|
||||
if (slot == null) {
|
||||
LOG.info("allocated " + numSlots + " slots before running out.");
|
||||
break;
|
||||
}
|
||||
slots.add(slot);
|
||||
numSlots++;
|
||||
}
|
||||
int slotIdx = 0;
|
||||
for (Slot slot : slots) {
|
||||
Assert.assertFalse(slot.addAnchor());
|
||||
Assert.assertEquals(slotIdx++, slot.getIndex());
|
||||
}
|
||||
for (Slot slot : slots) {
|
||||
slot.makeAnchorable();
|
||||
}
|
||||
for (Slot slot : slots) {
|
||||
Assert.assertTrue(slot.addAnchor());
|
||||
}
|
||||
for (Slot slot : slots) {
|
||||
slot.removeAnchor();
|
||||
}
|
||||
shm.close();
|
||||
for (Slot slot : slots) {
|
||||
slot.close();
|
||||
}
|
||||
stream.close();
|
||||
FileUtil.fullyDelete(path);
|
||||
}
|
||||
}
|
|
@ -65,7 +65,7 @@ public class TestBalancerWithNodeGroup {
|
|||
|
||||
ClientProtocol client;
|
||||
|
||||
static final long TIMEOUT = 20000L; //msec
|
||||
static final long TIMEOUT = 40000L; //msec
|
||||
static final double CAPACITY_ALLOWED_VARIANCE = 0.005; // 0.5%
|
||||
static final double BALANCE_ALLOWED_VARIANCE = 0.11; // 10%+delta
|
||||
static final int DEFAULT_BLOCK_SIZE = 10;
|
||||
|
|
|
@ -124,6 +124,8 @@ public class TestReplicationPolicyWithNodeGroup {
|
|||
CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
|
||||
NetworkTopologyWithNodeGroup.class.getName());
|
||||
|
||||
CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
|
||||
|
||||
File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class);
|
||||
|
||||
CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.io.InputStream;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Enumeration;
|
||||
import java.util.List;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
|
@ -301,11 +302,18 @@ public class TestAuditLogs {
|
|||
// Turn off the logs
|
||||
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
|
||||
logger.setLevel(Level.OFF);
|
||||
|
||||
|
||||
// Close the appenders and force all logs to be flushed
|
||||
Enumeration<?> appenders = logger.getAllAppenders();
|
||||
while (appenders.hasMoreElements()) {
|
||||
Appender appender = (Appender)appenders.nextElement();
|
||||
appender.close();
|
||||
}
|
||||
|
||||
BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
|
||||
String line = null;
|
||||
boolean ret = true;
|
||||
|
||||
|
||||
try {
|
||||
for (int i = 0; i < ndupe; i++) {
|
||||
line = reader.readLine();
|
||||
|
|
|
@ -85,6 +85,7 @@ public class TestNameNodeHttpServer {
|
|||
@Test
|
||||
public void testHttpPolicy() throws Exception {
|
||||
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||
|
||||
InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
|
||||
NameNodeHttpServer server = null;
|
||||
|
@ -103,7 +104,9 @@ public class TestNameNodeHttpServer {
|
|||
server.getHttpsAddress() == null));
|
||||
|
||||
} finally {
|
||||
server.stop();
|
||||
if (server != null) {
|
||||
server.stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,105 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestNameNodeOptionParsing {
|
||||
|
||||
@Test(timeout = 10000)
|
||||
public void testUpgrade() {
|
||||
StartupOption opt = null;
|
||||
// UPGRADE is set, but nothing else
|
||||
opt = NameNode.parseArguments(new String[] {"-upgrade"});
|
||||
assertEquals(opt, StartupOption.UPGRADE);
|
||||
assertNull(opt.getClusterId());
|
||||
assertTrue(FSImageFormat.renameReservedMap.isEmpty());
|
||||
// cluster ID is set
|
||||
opt = NameNode.parseArguments(new String[] { "-upgrade", "-clusterid",
|
||||
"mycid" });
|
||||
assertEquals(StartupOption.UPGRADE, opt);
|
||||
assertEquals("mycid", opt.getClusterId());
|
||||
assertTrue(FSImageFormat.renameReservedMap.isEmpty());
|
||||
// Everything is set
|
||||
opt = NameNode.parseArguments(new String[] { "-upgrade", "-clusterid",
|
||||
"mycid", "-renameReserved",
|
||||
".snapshot=.my-snapshot,.reserved=.my-reserved" });
|
||||
assertEquals(StartupOption.UPGRADE, opt);
|
||||
assertEquals("mycid", opt.getClusterId());
|
||||
assertEquals(".my-snapshot",
|
||||
FSImageFormat.renameReservedMap.get(".snapshot"));
|
||||
assertEquals(".my-reserved",
|
||||
FSImageFormat.renameReservedMap.get(".reserved"));
|
||||
// Reset the map
|
||||
FSImageFormat.renameReservedMap.clear();
|
||||
// Everything is set, but in a different order
|
||||
opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
|
||||
".reserved=.my-reserved,.snapshot=.my-snapshot", "-clusterid",
|
||||
"mycid"});
|
||||
assertEquals(StartupOption.UPGRADE, opt);
|
||||
assertEquals("mycid", opt.getClusterId());
|
||||
assertEquals(".my-snapshot",
|
||||
FSImageFormat.renameReservedMap.get(".snapshot"));
|
||||
assertEquals(".my-reserved",
|
||||
FSImageFormat.renameReservedMap.get(".reserved"));
|
||||
// Try the default renameReserved
|
||||
opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved"});
|
||||
assertEquals(StartupOption.UPGRADE, opt);
|
||||
assertEquals(
|
||||
".snapshot." + LayoutVersion.getCurrentLayoutVersion()
|
||||
+ ".UPGRADE_RENAMED",
|
||||
FSImageFormat.renameReservedMap.get(".snapshot"));
|
||||
assertEquals(
|
||||
".reserved." + LayoutVersion.getCurrentLayoutVersion()
|
||||
+ ".UPGRADE_RENAMED",
|
||||
FSImageFormat.renameReservedMap.get(".reserved"));
|
||||
|
||||
// Try some error conditions
|
||||
try {
|
||||
opt =
|
||||
NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
|
||||
".reserved=.my-reserved,.not-reserved=.my-not-reserved" });
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertExceptionContains("Unknown reserved path", e);
|
||||
}
|
||||
try {
|
||||
opt =
|
||||
NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
|
||||
".reserved=.my-reserved,.snapshot=.snapshot" });
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertExceptionContains("Invalid rename path", e);
|
||||
}
|
||||
try {
|
||||
opt =
|
||||
NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
|
||||
".snapshot=.reserved" });
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertExceptionContains("Invalid rename path", e);
|
||||
}
|
||||
opt = NameNode.parseArguments(new String[] { "-upgrade", "-cid"});
|
||||
assertNull(opt);
|
||||
}
|
||||
|
||||
}
|
|
@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
|||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImage;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
@ -65,6 +66,7 @@ import org.apache.log4j.Level;
|
|||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.internal.util.reflection.Whitebox;
|
||||
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.collect.Lists;
|
||||
|
@ -124,6 +126,9 @@ public class TestHASafeMode {
|
|||
final Path test = new Path("/test");
|
||||
// let nn0 enter safemode
|
||||
NameNodeAdapter.enterSafeMode(nn0, false);
|
||||
SafeModeInfo safeMode = (SafeModeInfo) Whitebox.getInternalState(
|
||||
nn0.getNamesystem(), "safeMode");
|
||||
Whitebox.setInternalState(safeMode, "extension", Integer.valueOf(30000));
|
||||
LOG.info("enter safemode");
|
||||
new Thread() {
|
||||
@Override
|
||||
|
|
|
@ -52,6 +52,7 @@ public class TestHttpsFileSystem {
|
|||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||
conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||
|
||||
File base = new File(BASEDIR);
|
||||
FileUtil.fullyDelete(base);
|
||||
|
|
Binary file not shown.
|
@ -469,6 +469,8 @@
|
|||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<cache-admin-command>-removePool pool1</cache-admin-command>
|
||||
<cache-admin-command>-removePool pool2</cache-admin-command>
|
||||
<cache-admin-command>-removePool pool3</cache-admin-command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
|
@ -489,5 +491,33 @@
|
|||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test> <!--Tested -->
|
||||
<description>Testing setting pool unlimited limits</description>
|
||||
<test-commands>
|
||||
<cache-admin-command>-addPool pool1 -limit unlimited -owner andrew -group andrew</cache-admin-command>
|
||||
<cache-admin-command>-addPool pool2 -limit 10 -owner andrew -group andrew</cache-admin-command>
|
||||
<cache-admin-command>-modifyPool pool2 -limit unlimited</cache-admin-command>
|
||||
<cache-admin-command>-listPools</cache-admin-command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<cache-admin-command>-removePool pool1</cache-admin-command>
|
||||
<cache-admin-command>-removePool pool2</cache-admin-command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>Found 2 results</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>pool1 andrew andrew rwxr-xr-x unlimited never</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>pool2 andrew andrew rwxr-xr-x unlimited never</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
</tests>
|
||||
</configuration>
|
||||
|
|
|
@ -153,6 +153,8 @@ Release 2.4.0 - UNRELEASED
|
|||
MAPREDUCE-5732. Report proper queue when job has been automatically placed
|
||||
(Sandy Ryza)
|
||||
|
||||
MAPREDUCE-5699. Allow setting tags on MR jobs (kasha)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -228,6 +230,10 @@ Release 2.3.0 - UNRELEASED
|
|||
MAPREDUCE-5725. Make explicit that TestNetworkedJob relies on the Capacity
|
||||
Scheduler (Sandy Ryza)
|
||||
|
||||
MAPREDUCE-5744. Job hangs because
|
||||
RMContainerAllocator$AssignedRequests.preemptReduce() violates the
|
||||
comparator contract (Gera Shegalov via kasha)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
MAPREDUCE-4680. Job history cleaner should only check timestamps of files in
|
||||
|
@ -347,6 +353,9 @@ Release 2.3.0 - UNRELEASED
|
|||
MAPREDUCE-5723. MR AM container log can be truncated or empty.
|
||||
(Mohammad Kamrul Islam via kasha)
|
||||
|
||||
MAPREDUCE-5743. Fixed the test failure in TestRMContainerAllocator.
|
||||
(Ted Yu and Vinod Kumar Vavilapalli via zjshen)
|
||||
|
||||
Release 2.2.0 - 2013-10-13
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -1143,9 +1143,9 @@ public class RMContainerAllocator extends RMContainerRequestor
|
|||
new Comparator<TaskAttemptId>() {
|
||||
@Override
|
||||
public int compare(TaskAttemptId o1, TaskAttemptId o2) {
|
||||
float p = getJob().getTask(o1.getTaskId()).getAttempt(o1).getProgress() -
|
||||
getJob().getTask(o2.getTaskId()).getAttempt(o2).getProgress();
|
||||
return p >= 0 ? 1 : -1;
|
||||
return Float.compare(
|
||||
getJob().getTask(o1.getTaskId()).getAttempt(o1).getProgress(),
|
||||
getJob().getTask(o2.getTaskId()).getAttempt(o2).getProgress());
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -1652,8 +1652,16 @@ public class TestRMContainerAllocator {
|
|||
RMApp app = rm.submitApp(1024);
|
||||
dispatcher.await();
|
||||
|
||||
// Make a node to register so as to launch the AM.
|
||||
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
|
||||
amNodeManager.nodeHeartbeat(true);
|
||||
dispatcher.await();
|
||||
|
||||
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
|
||||
.getAppAttemptId();
|
||||
rm.sendAMLaunched(appAttemptId);
|
||||
dispatcher.await();
|
||||
|
||||
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
|
||||
Job job = mock(Job.class);
|
||||
when(job.getReport()).thenReturn(
|
||||
|
|
|
@ -60,6 +60,8 @@ public interface MRJobConfig {
|
|||
|
||||
public static final String QUEUE_NAME = "mapreduce.job.queuename";
|
||||
|
||||
public static final String JOB_TAGS = "mapreduce.job.tags";
|
||||
|
||||
public static final String JVM_NUMTASKS_TORUN = "mapreduce.job.jvm.numtasks";
|
||||
|
||||
public static final String SPLIT_FILE = "mapreduce.job.splitfile";
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -15,7 +16,6 @@
|
|||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
|
||||
<!-- Do not modify this file directly. Instead, copy entries that you -->
|
||||
<!-- wish to modify from this file into mapred-site.xml and change them -->
|
||||
|
@ -727,6 +727,14 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.job.tags</name>
|
||||
<value></value>
|
||||
<description> Tags for the job that will be passed to YARN at submission
|
||||
time. Queries to YARN for applications can filter on these tags.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.cluster.local.dir</name>
|
||||
<value>${hadoop.tmp.dir}/mapred/local</value>
|
||||
|
|
|
@ -400,4 +400,10 @@ public class ResourceMgrDelegate extends YarnClient {
|
|||
IOException {
|
||||
return client.getContainers(applicationAttemptId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void moveApplicationAcrossQueues(ApplicationId appId, String queue)
|
||||
throws YarnException, IOException {
|
||||
client.moveApplicationAcrossQueues(appId, queue);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,9 @@ package org.apache.hadoop.mapred;
|
|||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Vector;
|
||||
|
@ -467,6 +469,8 @@ public class YARNRunner implements ClientProtocol {
|
|||
ContainerLaunchContext.newInstance(localResources, environment,
|
||||
vargsFinal, null, securityTokens, acls);
|
||||
|
||||
Collection<String> tagsFromConf =
|
||||
jobConf.getTrimmedStringCollection(MRJobConfig.JOB_TAGS);
|
||||
|
||||
// Set up the ApplicationSubmissionContext
|
||||
ApplicationSubmissionContext appContext =
|
||||
|
@ -486,6 +490,9 @@ public class YARNRunner implements ClientProtocol {
|
|||
MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS));
|
||||
appContext.setResource(capability);
|
||||
appContext.setApplicationType(MRJobConfig.MR_APPLICATION_TYPE);
|
||||
if (tagsFromConf != null && !tagsFromConf.isEmpty()) {
|
||||
appContext.setApplicationTags(new HashSet<String>(tagsFromConf));
|
||||
}
|
||||
return appContext;
|
||||
}
|
||||
|
||||
|
|
|
@ -784,6 +784,12 @@
|
|||
<artifactId>grizzly-http-servlet</artifactId>
|
||||
<version>2.1.2</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.fusesource.leveldbjni</groupId>
|
||||
<artifactId>leveldbjni-all</artifactId>
|
||||
<version>1.8</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
|
|||
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.api.records.ResourceRequest;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
|
||||
|
@ -865,4 +866,10 @@ public class ResourceSchedulerWrapper implements
|
|||
public RMContainer getRMContainer(ContainerId containerId) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String moveApplication(ApplicationId appId, String newQueue)
|
||||
throws YarnException {
|
||||
return scheduler.moveApplication(appId, newQueue);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,14 @@ Trunk - Unreleased
|
|||
YARN-1498. Common scheduler changes for moving apps between queues (Sandy
|
||||
Ryza)
|
||||
|
||||
YARN-1504. RM changes for moving apps between queues (Sandy Ryza)
|
||||
|
||||
YARN-1499. Fair Scheduler changes for moving apps between queues (Sandy
|
||||
Ryza)
|
||||
|
||||
YARN-1497. Command line additions for moving apps between queues (Sandy
|
||||
Ryza)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
@ -77,6 +85,40 @@ Release 2.4.0 - UNRELEASED
|
|||
YARN-1413. Implemented serving of aggregated-logs in the ApplicationHistory
|
||||
server. (Mayank Bansal via vinodkv)
|
||||
|
||||
YARN-1633. Defined user-facing entity, entity-info and event objects related
|
||||
to Application Timeline feature. (Zhijie Shen via vinodkv)
|
||||
|
||||
YARN-1611. Introduced the concept of a configuration provider which can be
|
||||
used by ResourceManager to read configuration locally or from remote systems
|
||||
so as to help RM failover. (Xuan Gong via vinodkv)
|
||||
|
||||
YARN-1659. Defined the ApplicationTimelineStore store as an abstraction for
|
||||
implementing different storage impls for storing timeline information.
|
||||
(Billie Rinaldi via vinodkv)
|
||||
|
||||
YARN-1634. Added a testable in-memory implementation of
|
||||
ApplicationTimelineStore. (Zhijie Shen via vinodkv)
|
||||
|
||||
YARN-1461. Added tags for YARN applications and changed RM to handle them.
|
||||
(Karthik Kambatla via zjshen)
|
||||
|
||||
YARN-1636. Augmented Application-history server's web-services to also expose
|
||||
new APIs for retrieving and storing timeline information. (Zhijie Shen via
|
||||
vinodkv)
|
||||
|
||||
YARN-1490. Introduced the ability to make ResourceManager optionally not kill
|
||||
all containers when an ApplicationMaster exits. (Jian He via vinodkv)
|
||||
|
||||
YARN-1041. Added the ApplicationMasterProtocol API for applications to use the
|
||||
ability in ResourceManager to optionally not kill containers when the
|
||||
ApplicationMaster exits. (Jian He via vinodkv)
|
||||
|
||||
YARN-1566. Changed Distributed Shell to retain containers across application
|
||||
attempts. (Jian He via vinodkv)
|
||||
|
||||
YARN-1635. Implemented a Leveldb based ApplicationTimelineStore. (Billie
|
||||
Rinaldi via zjshen)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
YARN-1007. Enhance History Reader interface for Containers. (Mayank Bansal via
|
||||
|
@ -94,6 +136,36 @@ Release 2.4.0 - UNRELEASED
|
|||
YARN-1617. Remove ancient comment and surround LOG.debug in
|
||||
AppSchedulingInfo.allocate (Sandy Ryza)
|
||||
|
||||
YARN-1639. Modified RM HA configuration handling to have a way of not
|
||||
requiring separate configuration files for each RM. (Xuan Gong via vinodkv)
|
||||
|
||||
YARN-1668. Modified RM HA handling of admin-acls to be available across RM
|
||||
failover by making using of a remote configuration-provider. (Xuan Gong via
|
||||
vinodkv)
|
||||
|
||||
YARN-1667. Modified RM HA handling of super users (with proxying ability) to
|
||||
be available across RM failover by making using of a remote
|
||||
configuration-provider. (Xuan Gong via vinodkv)
|
||||
|
||||
YARN-1285. Changed the default value of yarn.acl.enable in yarn-default.xml
|
||||
to be consistent with what exists (false) in the code and documentation.
|
||||
(Kenji Kikushima via vinodkv)
|
||||
|
||||
YARN-1669. Modified RM HA handling of protocol level service-ACLS to
|
||||
be available across RM failover by making using of a remote
|
||||
configuration-provider. (Xuan Gong via vinodkv)
|
||||
|
||||
YARN-1665. Simplify the configuration of RM HA by having better default
|
||||
values. (Xuan Gong via vinodkv)
|
||||
|
||||
YARN-1660. Simplified the RM HA configuration to accept and be able to simply
|
||||
depend just on configuration properties of the form
|
||||
yarn.resourcemanager.hostname.RMID and use the default ports for all service
|
||||
addresses. (Xuan Gong via vinodkv)
|
||||
|
||||
YARN-1493. Changed ResourceManager and Scheduler interfacing to recognize
|
||||
app-attempts separately from apps. (Jian He via vinodkv)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -143,6 +215,24 @@ Release 2.4.0 - UNRELEASED
|
|||
YARN-1632. TestApplicationMasterServices should be under
|
||||
org.apache.hadoop.yarn.server.resourcemanager package (Chen He via jeagles)
|
||||
|
||||
YARN-1673. Fix option parsing in YARN's application CLI after it is broken
|
||||
by YARN-967. (Mayank Bansal via vinodkv)
|
||||
|
||||
YARN-1684. Fixed history server heap size in yarn script. (Billie Rinaldi
|
||||
via zjshen)
|
||||
|
||||
YARN-1166. Fixed app-specific and attempt-specific QueueMetrics to be
|
||||
triggered by accordingly app event and attempt event.
|
||||
|
||||
YARN-1689. Made RMAppAttempt get killed when RMApp is at ACCEPTED. (Vinod
|
||||
Kumar Vavilapalli via zjshen)
|
||||
|
||||
YARN-1661. Fixed DS ApplicationMaster to write the correct exit log. (Vinod
|
||||
Kumar Vavilapalli via zjshen)
|
||||
|
||||
YARN-1672. YarnConfiguration is missing a default for
|
||||
yarn.nodemanager.log.retain-seconds (Naren Koneru via kasha)
|
||||
|
||||
Release 2.3.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -186,18 +276,8 @@ Release 2.3.0 - UNRELEASED
|
|||
YARN-1029. Added embedded leader election in the ResourceManager. (Karthik
|
||||
Kambatla via vinodkv)
|
||||
|
||||
YARN-1490. Introduced the ability to make ResourceManager optionally not kill
|
||||
all containers when an ApplicationMaster exits. (Jian He via vinodkv)
|
||||
|
||||
YARN-1033. Expose RM active/standby state to Web UI and REST API (kasha)
|
||||
|
||||
YARN-1041. Added the ApplicationMasterProtocol API for applications to use the
|
||||
ability in ResourceManager to optionally not kill containers when the
|
||||
ApplicationMaster exits. (Jian He via vinodkv)
|
||||
|
||||
YARN-1566. Changed Distributed Shell to retain containers across application
|
||||
attempts. (Jian He via vinodkv)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
YARN-305. Fair scheduler logs too many "Node offered to app" messages.
|
||||
|
@ -361,9 +441,6 @@ Release 2.3.0 - UNRELEASED
|
|||
YARN-1541. Changed ResourceManager to invalidate ApplicationMaster host/port
|
||||
information once an AM crashes. (Jian He via vinodkv)
|
||||
|
||||
YARN-1493. Changed ResourceManager and Scheduler interfacing to recognize
|
||||
app-attempts separately from apps. (Jian He via vinodkv)
|
||||
|
||||
YARN-1482. Modified WebApplicationProxy to make it work across ResourceManager
|
||||
fail-over. (Xuan Gong via vinodkv)
|
||||
|
||||
|
@ -549,9 +626,6 @@ Release 2.3.0 - UNRELEASED
|
|||
YARN-1574. RMDispatcher should be reset on transition to standby. (Xuan Gong
|
||||
via kasha)
|
||||
|
||||
YARN-1166. Fixed app-specific and attempt-specific QueueMetrics to be
|
||||
triggered by accordingly app event and attempt event.
|
||||
|
||||
YARN-1598. HA-related rmadmin commands don't work on a secure cluster (kasha)
|
||||
|
||||
YARN-1603. Remove two *.orig files which were unexpectedly committed.
|
||||
|
@ -576,6 +650,9 @@ Release 2.3.0 - UNRELEASED
|
|||
|
||||
YARN-1629. IndexOutOfBoundsException in MaxRunningAppsEnforcer (Sandy Ryza)
|
||||
|
||||
YARN-1628. Fixed the test failure in TestContainerManagerSecurity. (Vinod
|
||||
Kumar Vavilapalli via zjshen)
|
||||
|
||||
Release 2.2.0 - 2013-10-13
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -204,7 +204,7 @@ elif [ "$COMMAND" = "historyserver" ] ; then
|
|||
CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/ahs-config/log4j.properties
|
||||
CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
|
||||
YARN_OPTS="$YARN_OPTS $YARN_HISTORYSERVER_OPTS"
|
||||
if [ "$YARN_RESOURCEMANAGER_HEAPSIZE" != "" ]; then
|
||||
if [ "$YARN_HISTORYSERVER_HEAPSIZE" != "" ]; then
|
||||
JAVA_HEAP_MAX="-Xmx""$YARN_HISTORYSERVER_HEAPSIZE""m"
|
||||
fi
|
||||
elif [ "$COMMAND" = "nodemanager" ] ; then
|
||||
|
|
|
@ -207,7 +207,7 @@ goto :eof
|
|||
set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\ahs-config\log4j.properties
|
||||
set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer
|
||||
set YARN_OPTS=%YARN_OPTS% %HADOOP_HISTORYSERVER_OPTS%
|
||||
if defined YARN_RESOURCEMANAGER_HEAPSIZE (
|
||||
if defined YARN_HISTORYSERVER_HEAPSIZE (
|
||||
set JAVA_HEAP_MAX=-Xmx%YARN_HISTORYSERVER_HEAPSIZE%m
|
||||
)
|
||||
goto :eof
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.api.protocolrecords;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* Enumeration that controls the scope of applications fetched
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Unstable
|
||||
public enum ApplicationsRequestScope {
|
||||
/** All jobs */
|
||||
ALL,
|
||||
|
||||
/** Jobs viewable by current user */
|
||||
VIEWABLE,
|
||||
|
||||
/** Jobs owned by current user */
|
||||
OWN
|
||||
}
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.yarn.api.protocolrecords;
|
|||
import java.util.EnumSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.collections.buffer.UnboundedFifoBuffer;
|
||||
import org.apache.commons.lang.math.LongRange;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
|
@ -49,6 +48,86 @@ public abstract class GetApplicationsRequest {
|
|||
return request;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* The request from clients to get a report of Applications matching the
|
||||
* giving application types in the cluster from the
|
||||
* <code>ResourceManager</code>.
|
||||
* </p>
|
||||
*
|
||||
* @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
|
||||
*
|
||||
* <p>Setting any of the parameters to null, would just disable that
|
||||
* filter</p>
|
||||
*
|
||||
* @param scope {@link ApplicationsRequestScope} to filter by
|
||||
* @param users list of users to filter by
|
||||
* @param queues list of scheduler queues to filter by
|
||||
* @param applicationTypes types of applications
|
||||
* @param applicationTags application tags to filter by
|
||||
* @param applicationStates application states to filter by
|
||||
* @param startRange range of application start times to filter by
|
||||
* @param finishRange range of application finish times to filter by
|
||||
* @param limit number of applications to limit to
|
||||
* @return {@link GetApplicationsRequest} to be used with
|
||||
* {@link ApplicationClientProtocol#getApplications(GetApplicationsRequest)}
|
||||
*/
|
||||
@Public
|
||||
@Stable
|
||||
public static GetApplicationsRequest newInstance(
|
||||
ApplicationsRequestScope scope,
|
||||
Set<String> users,
|
||||
Set<String> queues,
|
||||
Set<String> applicationTypes,
|
||||
Set<String> applicationTags,
|
||||
EnumSet<YarnApplicationState> applicationStates,
|
||||
LongRange startRange,
|
||||
LongRange finishRange,
|
||||
Long limit) {
|
||||
GetApplicationsRequest request =
|
||||
Records.newRecord(GetApplicationsRequest.class);
|
||||
if (scope != null) {
|
||||
request.setScope(scope);
|
||||
}
|
||||
request.setUsers(users);
|
||||
request.setQueues(queues);
|
||||
request.setApplicationTypes(applicationTypes);
|
||||
request.setApplicationTags(applicationTags);
|
||||
request.setApplicationStates(applicationStates);
|
||||
if (startRange != null) {
|
||||
request.setStartRange(
|
||||
startRange.getMinimumLong(), startRange.getMaximumLong());
|
||||
}
|
||||
if (finishRange != null) {
|
||||
request.setFinishRange(
|
||||
finishRange.getMinimumLong(), finishRange.getMaximumLong());
|
||||
}
|
||||
if (limit != null) {
|
||||
request.setLimit(limit);
|
||||
}
|
||||
return request;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* The request from clients to get a report of Applications matching the
|
||||
* giving application types in the cluster from the
|
||||
* <code>ResourceManager</code>.
|
||||
* </p>
|
||||
*
|
||||
* @param scope {@link ApplicationsRequestScope} to filter by
|
||||
* @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
|
||||
*/
|
||||
@Public
|
||||
@Stable
|
||||
public static GetApplicationsRequest newInstance(
|
||||
ApplicationsRequestScope scope) {
|
||||
GetApplicationsRequest request =
|
||||
Records.newRecord(GetApplicationsRequest.class);
|
||||
request.setScope(scope);
|
||||
return request;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* The request from clients to get a report of Applications matching the
|
||||
|
@ -257,4 +336,40 @@ public abstract class GetApplicationsRequest {
|
|||
@Private
|
||||
@Unstable
|
||||
public abstract void setFinishRange(long begin, long end);
|
||||
|
||||
/**
|
||||
* Get the tags to filter applications on
|
||||
*
|
||||
* @return list of tags to filter on
|
||||
*/
|
||||
@Private
|
||||
@Unstable
|
||||
public abstract Set<String> getApplicationTags();
|
||||
|
||||
/**
|
||||
* Set the list of tags to filter applications on
|
||||
*
|
||||
* @param tags list of tags to filter on
|
||||
*/
|
||||
@Private
|
||||
@Unstable
|
||||
public abstract void setApplicationTags(Set<String> tags);
|
||||
|
||||
/**
|
||||
* Get the {@link ApplicationsRequestScope} of applications to be filtered.
|
||||
*
|
||||
* @return {@link ApplicationsRequestScope} of applications to return.
|
||||
*/
|
||||
@Private
|
||||
@Unstable
|
||||
public abstract ApplicationsRequestScope getScope();
|
||||
|
||||
/**
|
||||
* Set the {@link ApplicationsRequestScope} of applications to filter.
|
||||
*
|
||||
* @param scope scope to use for filtering applications
|
||||
*/
|
||||
@Private
|
||||
@Unstable
|
||||
public abstract void setScope(ApplicationsRequestScope scope);
|
||||
}
|
||||
|
|
|
@ -25,6 +25,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
|||
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* <p><code>ApplicationReport</code> is a report of an application.</p>
|
||||
*
|
||||
|
@ -321,6 +323,18 @@ public abstract class ApplicationReport {
|
|||
@Unstable
|
||||
public abstract void setApplicationType(String applicationType);
|
||||
|
||||
/**
|
||||
* Get all tags corresponding to the application
|
||||
* @return Application's tags
|
||||
*/
|
||||
@Public
|
||||
@Stable
|
||||
public abstract Set<String> getApplicationTags();
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public abstract void setApplicationTags(Set<String> tags);
|
||||
|
||||
@Private
|
||||
@Stable
|
||||
public abstract void setAMRMToken(Token amRmToken);
|
||||
|
|
|
@ -25,8 +25,11 @@ import org.apache.hadoop.classification.InterfaceStability.Stable;
|
|||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
|
||||
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* <p><code>ApplicationSubmissionContext</code> represents all of the
|
||||
* information needed by the <code>ResourceManager</code> to launch
|
||||
|
@ -284,7 +287,6 @@ public abstract class ApplicationSubmissionContext {
|
|||
@Stable
|
||||
public abstract void setApplicationType(String applicationType);
|
||||
|
||||
|
||||
/**
|
||||
* Get the flag which indicates whether to keep containers across application
|
||||
* attempts or not.
|
||||
|
@ -314,4 +316,26 @@ public abstract class ApplicationSubmissionContext {
|
|||
@Stable
|
||||
public abstract void setKeepContainersAcrossApplicationAttempts(
|
||||
boolean keepContainers);
|
||||
|
||||
/**
|
||||
* Get tags for the application
|
||||
*
|
||||
* @return the application tags
|
||||
*/
|
||||
@Public
|
||||
@Stable
|
||||
public abstract Set<String> getApplicationTags();
|
||||
|
||||
/**
|
||||
* Set tags for the application. A maximum of
|
||||
* {@link YarnConfiguration#APPLICATION_MAX_TAGS} are allowed
|
||||
* per application. Each tag can be at most
|
||||
* {@link YarnConfiguration#APPLICATION_MAX_TAG_LENGTH}
|
||||
* characters, and can contain only ASCII characters.
|
||||
*
|
||||
* @param tags tags to set
|
||||
*/
|
||||
@Public
|
||||
@Stable
|
||||
public abstract void setApplicationTags(Set<String> tags);
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.api.records.apptimeline;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
|
||||
/**
|
||||
* The class that hosts a list of application timeline entities.
|
||||
*/
|
||||
@XmlRootElement(name = "entities")
|
||||
@XmlAccessorType(XmlAccessType.NONE)
|
||||
@Public
|
||||
@Unstable
|
||||
public class ATSEntities {
|
||||
|
||||
private List<ATSEntity> entities =
|
||||
new ArrayList<ATSEntity>();
|
||||
|
||||
public ATSEntities() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of entities
|
||||
*
|
||||
* @return a list of entities
|
||||
*/
|
||||
@XmlElement(name = "entities")
|
||||
public List<ATSEntity> getEntities() {
|
||||
return entities;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single entity into the existing entity list
|
||||
*
|
||||
* @param entity
|
||||
* a single entity
|
||||
*/
|
||||
public void addEntity(ATSEntity entity) {
|
||||
entities.add(entity);
|
||||
}
|
||||
|
||||
/**
|
||||
* All a list of entities into the existing entity list
|
||||
*
|
||||
* @param entities
|
||||
* a list of entities
|
||||
*/
|
||||
public void addEntities(List<ATSEntity> entities) {
|
||||
this.entities.addAll(entities);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the entity list to the given list of entities
|
||||
*
|
||||
* @param entities
|
||||
* a list of entities
|
||||
*/
|
||||
public void setEntities(List<ATSEntity> entities) {
|
||||
this.entities = entities;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,401 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.api.records.apptimeline;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* The class that contains the the meta information of some conceptual entity of
|
||||
* an application and its related events. The entity can be an application, an
|
||||
* application attempt, a container or whatever the user-defined object.
|
||||
* </p>
|
||||
*
|
||||
* <p>
|
||||
* Primary filters will be used to index the entities in
|
||||
* <code>ApplicationTimelineStore</code>, such that users should carefully
|
||||
* choose the information they want to store as the primary filters. The
|
||||
* remaining can be stored as other information.
|
||||
* </p>
|
||||
*/
|
||||
@XmlRootElement(name = "entity")
|
||||
@XmlAccessorType(XmlAccessType.NONE)
|
||||
@Public
|
||||
@Unstable
|
||||
public class ATSEntity implements Comparable<ATSEntity> {
|
||||
|
||||
private String entityType;
|
||||
private String entityId;
|
||||
private Long startTime;
|
||||
private List<ATSEvent> events = new ArrayList<ATSEvent>();
|
||||
private Map<String, List<String>> relatedEntities =
|
||||
new HashMap<String, List<String>>();
|
||||
private Map<String, Object> primaryFilters =
|
||||
new HashMap<String, Object>();
|
||||
private Map<String, Object> otherInfo =
|
||||
new HashMap<String, Object>();
|
||||
|
||||
public ATSEntity() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the entity type
|
||||
*
|
||||
* @return the entity type
|
||||
*/
|
||||
@XmlElement(name = "entitytype")
|
||||
public String getEntityType() {
|
||||
return entityType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the entity type
|
||||
*
|
||||
* @param entityType
|
||||
* the entity type
|
||||
*/
|
||||
public void setEntityType(String entityType) {
|
||||
this.entityType = entityType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the entity Id
|
||||
*
|
||||
* @return the entity Id
|
||||
*/
|
||||
@XmlElement(name = "entity")
|
||||
public String getEntityId() {
|
||||
return entityId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the entity Id
|
||||
*
|
||||
* @param entityId
|
||||
* the entity Id
|
||||
*/
|
||||
public void setEntityId(String entityId) {
|
||||
this.entityId = entityId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the start time of the entity
|
||||
*
|
||||
* @return the start time of the entity
|
||||
*/
|
||||
@XmlElement(name = "starttime")
|
||||
public Long getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the start time of the entity
|
||||
*
|
||||
* @param startTime
|
||||
* the start time of the entity
|
||||
*/
|
||||
public void setStartTime(Long startTime) {
|
||||
this.startTime = startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of events related to the entity
|
||||
*
|
||||
* @return a list of events related to the entity
|
||||
*/
|
||||
@XmlElement(name = "events")
|
||||
public List<ATSEvent> getEvents() {
|
||||
return events;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single event related to the entity to the existing event list
|
||||
*
|
||||
* @param event
|
||||
* a single event related to the entity
|
||||
*/
|
||||
public void addEvent(ATSEvent event) {
|
||||
events.add(event);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a list of events related to the entity to the existing event list
|
||||
*
|
||||
* @param events
|
||||
* a list of events related to the entity
|
||||
*/
|
||||
public void addEvents(List<ATSEvent> events) {
|
||||
this.events.addAll(events);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the event list to the given list of events related to the entity
|
||||
*
|
||||
* @param events
|
||||
* events a list of events related to the entity
|
||||
*/
|
||||
public void setEvents(List<ATSEvent> events) {
|
||||
this.events = events;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the related entities
|
||||
*
|
||||
* @return the related entities
|
||||
*/
|
||||
@XmlElement(name = "relatedentities")
|
||||
public Map<String, List<String>> getRelatedEntities() {
|
||||
return relatedEntities;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add an entity to the existing related entity map
|
||||
*
|
||||
* @param entityType
|
||||
* the entity type
|
||||
* @param entityId
|
||||
* the entity Id
|
||||
*/
|
||||
public void addRelatedEntity(String entityType, String entityId) {
|
||||
List<String> thisRelatedEntity = relatedEntities.get(entityType);
|
||||
if (thisRelatedEntity == null) {
|
||||
thisRelatedEntity = new ArrayList<String>();
|
||||
relatedEntities.put(entityType, thisRelatedEntity);
|
||||
}
|
||||
thisRelatedEntity.add(entityId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a map of related entities to the existing related entity map
|
||||
*
|
||||
* @param relatedEntities
|
||||
* a map of related entities
|
||||
*/
|
||||
public void addRelatedEntities(Map<String, List<String>> relatedEntities) {
|
||||
for (Entry<String, List<String>> relatedEntity :
|
||||
relatedEntities.entrySet()) {
|
||||
List<String> thisRelatedEntity =
|
||||
this.relatedEntities.get(relatedEntity.getKey());
|
||||
if (thisRelatedEntity == null) {
|
||||
this.relatedEntities.put(
|
||||
relatedEntity.getKey(), relatedEntity.getValue());
|
||||
} else {
|
||||
thisRelatedEntity.addAll(relatedEntity.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the related entity map to the given map of related entities
|
||||
*
|
||||
* @param relatedEntities
|
||||
* a map of related entities
|
||||
*/
|
||||
public void setRelatedEntities(
|
||||
Map<String, List<String>> relatedEntities) {
|
||||
this.relatedEntities = relatedEntities;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the primary filters
|
||||
*
|
||||
* @return the primary filters
|
||||
*/
|
||||
@XmlElement(name = "primaryfilters")
|
||||
public Map<String, Object> getPrimaryFilters() {
|
||||
return primaryFilters;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single piece of primary filter to the existing primary filter map
|
||||
*
|
||||
* @param key
|
||||
* the primary filter key
|
||||
* @param value
|
||||
* the primary filter value
|
||||
*/
|
||||
public void addPrimaryFilter(String key, Object value) {
|
||||
primaryFilters.put(key, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a map of primary filters to the existing primary filter map
|
||||
*
|
||||
* @param primaryFilters
|
||||
* a map of primary filters
|
||||
*/
|
||||
public void addPrimaryFilters(Map<String, Object> primaryFilters) {
|
||||
this.primaryFilters.putAll(primaryFilters);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the primary filter map to the given map of primary filters
|
||||
*
|
||||
* @param primaryFilters
|
||||
* a map of primary filters
|
||||
*/
|
||||
public void setPrimaryFilters(Map<String, Object> primaryFilters) {
|
||||
this.primaryFilters = primaryFilters;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the other information of the entity
|
||||
*
|
||||
* @return the other information of the entity
|
||||
*/
|
||||
@XmlElement(name = "otherinfo")
|
||||
public Map<String, Object> getOtherInfo() {
|
||||
return otherInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add one piece of other information of the entity to the existing other info
|
||||
* map
|
||||
*
|
||||
* @param key
|
||||
* the other information key
|
||||
* @param value
|
||||
* the other information value
|
||||
*/
|
||||
public void addOtherInfo(String key, Object value) {
|
||||
this.otherInfo.put(key, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a map of other information of the entity to the existing other info map
|
||||
*
|
||||
* @param otherInfo
|
||||
* a map of other information
|
||||
*/
|
||||
public void addOtherInfo(Map<String, Object> otherInfo) {
|
||||
this.otherInfo.putAll(otherInfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the other info map to the given map of other information
|
||||
*
|
||||
* @param otherInfo
|
||||
* a map of other information
|
||||
*/
|
||||
public void setOtherInfo(Map<String, Object> otherInfo) {
|
||||
this.otherInfo = otherInfo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// generated by eclipse
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + ((entityId == null) ? 0 : entityId.hashCode());
|
||||
result =
|
||||
prime * result + ((entityType == null) ? 0 : entityType.hashCode());
|
||||
result = prime * result + ((events == null) ? 0 : events.hashCode());
|
||||
result = prime * result + ((otherInfo == null) ? 0 : otherInfo.hashCode());
|
||||
result =
|
||||
prime * result
|
||||
+ ((primaryFilters == null) ? 0 : primaryFilters.hashCode());
|
||||
result =
|
||||
prime * result
|
||||
+ ((relatedEntities == null) ? 0 : relatedEntities.hashCode());
|
||||
result = prime * result + ((startTime == null) ? 0 : startTime.hashCode());
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
// generated by eclipse
|
||||
if (this == obj)
|
||||
return true;
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
ATSEntity other = (ATSEntity) obj;
|
||||
if (entityId == null) {
|
||||
if (other.entityId != null)
|
||||
return false;
|
||||
} else if (!entityId.equals(other.entityId))
|
||||
return false;
|
||||
if (entityType == null) {
|
||||
if (other.entityType != null)
|
||||
return false;
|
||||
} else if (!entityType.equals(other.entityType))
|
||||
return false;
|
||||
if (events == null) {
|
||||
if (other.events != null)
|
||||
return false;
|
||||
} else if (!events.equals(other.events))
|
||||
return false;
|
||||
if (otherInfo == null) {
|
||||
if (other.otherInfo != null)
|
||||
return false;
|
||||
} else if (!otherInfo.equals(other.otherInfo))
|
||||
return false;
|
||||
if (primaryFilters == null) {
|
||||
if (other.primaryFilters != null)
|
||||
return false;
|
||||
} else if (!primaryFilters.equals(other.primaryFilters))
|
||||
return false;
|
||||
if (relatedEntities == null) {
|
||||
if (other.relatedEntities != null)
|
||||
return false;
|
||||
} else if (!relatedEntities.equals(other.relatedEntities))
|
||||
return false;
|
||||
if (startTime == null) {
|
||||
if (other.startTime != null)
|
||||
return false;
|
||||
} else if (!startTime.equals(other.startTime))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(ATSEntity other) {
|
||||
int comparison = entityType.compareTo(other.entityType);
|
||||
if (comparison == 0) {
|
||||
long thisStartTime =
|
||||
startTime == null ? Long.MIN_VALUE : startTime;
|
||||
long otherStartTime =
|
||||
other.startTime == null ? Long.MIN_VALUE : other.startTime;
|
||||
if (thisStartTime > otherStartTime) {
|
||||
return -1;
|
||||
} else if (thisStartTime < otherStartTime) {
|
||||
return 1;
|
||||
} else {
|
||||
return entityId.compareTo(other.entityId);
|
||||
}
|
||||
} else {
|
||||
return comparison;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,172 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.api.records.apptimeline;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
|
||||
/**
|
||||
* The class that contains the information of an event that is related to some
|
||||
* conceptual entity of an application. Users are free to define what the event
|
||||
* means, such as starting an application, getting allocated a container and
|
||||
* etc.
|
||||
*/
|
||||
@XmlRootElement(name = "event")
|
||||
@XmlAccessorType(XmlAccessType.NONE)
|
||||
@Public
|
||||
@Unstable
|
||||
public class ATSEvent implements Comparable<ATSEvent> {
|
||||
|
||||
private long timestamp;
|
||||
private String eventType;
|
||||
private Map<String, Object> eventInfo = new HashMap<String, Object>();
|
||||
|
||||
public ATSEvent() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the timestamp of the event
|
||||
*
|
||||
* @return the timestamp of the event
|
||||
*/
|
||||
@XmlElement(name = "timestamp")
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the timestamp of the event
|
||||
*
|
||||
* @param timestamp
|
||||
* the timestamp of the event
|
||||
*/
|
||||
public void setTimestamp(long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the event type
|
||||
*
|
||||
* @return the event type
|
||||
*/
|
||||
@XmlElement(name = "eventtype")
|
||||
public String getEventType() {
|
||||
return eventType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the event type
|
||||
*
|
||||
* @param eventType
|
||||
* the event type
|
||||
*/
|
||||
public void setEventType(String eventType) {
|
||||
this.eventType = eventType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the information of the event
|
||||
*
|
||||
* @return the information of the event
|
||||
*/
|
||||
@XmlElement(name = "eventinfo")
|
||||
public Map<String, Object> getEventInfo() {
|
||||
return eventInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add one piece of the information of the event to the existing information
|
||||
* map
|
||||
*
|
||||
* @param key
|
||||
* the information key
|
||||
* @param value
|
||||
* the information value
|
||||
*/
|
||||
public void addEventInfo(String key, Object value) {
|
||||
this.eventInfo.put(key, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a map of the information of the event to the existing information map
|
||||
*
|
||||
* @param eventInfo
|
||||
* a map of of the information of the event
|
||||
*/
|
||||
public void addEventInfo(Map<String, Object> eventInfo) {
|
||||
this.eventInfo.putAll(eventInfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the information map to the given map of the information of the event
|
||||
*
|
||||
* @param eventInfo
|
||||
* a map of of the information of the event
|
||||
*/
|
||||
public void setEventInfo(Map<String, Object> eventInfo) {
|
||||
this.eventInfo = eventInfo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(ATSEvent other) {
|
||||
if (timestamp > other.timestamp) {
|
||||
return -1;
|
||||
} else if (timestamp < other.timestamp) {
|
||||
return 1;
|
||||
} else {
|
||||
return eventType.compareTo(other.eventType);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
return true;
|
||||
if (o == null || getClass() != o.getClass())
|
||||
return false;
|
||||
|
||||
ATSEvent atsEvent = (ATSEvent) o;
|
||||
|
||||
if (timestamp != atsEvent.timestamp)
|
||||
return false;
|
||||
if (!eventType.equals(atsEvent.eventType))
|
||||
return false;
|
||||
if (eventInfo != null ? !eventInfo.equals(atsEvent.eventInfo) :
|
||||
atsEvent.eventInfo != null)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = (int) (timestamp ^ (timestamp >>> 32));
|
||||
result = 31 * result + eventType.hashCode();
|
||||
result = 31 * result + (eventInfo != null ? eventInfo.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,189 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.api.records.apptimeline;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
|
||||
/**
|
||||
* The class that hosts a list of events, which are categorized according to
|
||||
* their related entities.
|
||||
*/
|
||||
@XmlRootElement(name = "events")
|
||||
@XmlAccessorType(XmlAccessType.NONE)
|
||||
@Public
|
||||
@Unstable
|
||||
public class ATSEvents {
|
||||
|
||||
private List<ATSEventsOfOneEntity> allEvents =
|
||||
new ArrayList<ATSEventsOfOneEntity>();
|
||||
|
||||
public ATSEvents() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of {@link ATSEventsOfOneEntity} instances
|
||||
*
|
||||
* @return a list of {@link ATSEventsOfOneEntity} instances
|
||||
*/
|
||||
@XmlElement(name = "events")
|
||||
public List<ATSEventsOfOneEntity> getAllEvents() {
|
||||
return allEvents;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single {@link ATSEventsOfOneEntity} instance into the existing list
|
||||
*
|
||||
* @param eventsOfOneEntity
|
||||
* a single {@link ATSEventsOfOneEntity} instance
|
||||
*/
|
||||
public void addEvent(ATSEventsOfOneEntity eventsOfOneEntity) {
|
||||
allEvents.add(eventsOfOneEntity);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a list of {@link ATSEventsOfOneEntity} instances into the existing list
|
||||
*
|
||||
* @param allEvents
|
||||
* a list of {@link ATSEventsOfOneEntity} instances
|
||||
*/
|
||||
public void addEvents(List<ATSEventsOfOneEntity> allEvents) {
|
||||
this.allEvents.addAll(allEvents);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the list to the given list of {@link ATSEventsOfOneEntity} instances
|
||||
*
|
||||
* @param allEvents
|
||||
* a list of {@link ATSEventsOfOneEntity} instances
|
||||
*/
|
||||
public void setEvents(List<ATSEventsOfOneEntity> allEvents) {
|
||||
this.allEvents.clear();
|
||||
this.allEvents.addAll(allEvents);
|
||||
}
|
||||
|
||||
/**
|
||||
* The class that hosts a list of events that are only related to one entity.
|
||||
*/
|
||||
@XmlRootElement(name = "events")
|
||||
@XmlAccessorType(XmlAccessType.NONE)
|
||||
@Public
|
||||
@Unstable
|
||||
public static class ATSEventsOfOneEntity {
|
||||
|
||||
private String entityId;
|
||||
private String entityType;
|
||||
private List<ATSEvent> events = new ArrayList<ATSEvent>();
|
||||
|
||||
public ATSEventsOfOneEntity() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the entity Id
|
||||
*
|
||||
* @return the entity Id
|
||||
*/
|
||||
@XmlElement(name = "entity")
|
||||
public String getEntityId() {
|
||||
return entityId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the entity Id
|
||||
*
|
||||
* @param entityId
|
||||
* the entity Id
|
||||
*/
|
||||
public void setEntityId(String entityId) {
|
||||
this.entityId = entityId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the entity type
|
||||
*
|
||||
* @return the entity type
|
||||
*/
|
||||
@XmlElement(name = "entitytype")
|
||||
public String getEntityType() {
|
||||
return entityType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the entity type
|
||||
*
|
||||
* @param entityType
|
||||
* the entity type
|
||||
*/
|
||||
public void setEntityType(String entityType) {
|
||||
this.entityType = entityType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of events
|
||||
*
|
||||
* @return a list of events
|
||||
*/
|
||||
@XmlElement(name = "events")
|
||||
public List<ATSEvent> getEvents() {
|
||||
return events;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single event to the existing event list
|
||||
*
|
||||
* @param event
|
||||
* a single event
|
||||
*/
|
||||
public void addEvent(ATSEvent event) {
|
||||
events.add(event);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a list of event to the existing event list
|
||||
*
|
||||
* @param events
|
||||
* a list of events
|
||||
*/
|
||||
public void addEvents(List<ATSEvent> events) {
|
||||
this.events.addAll(events);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the event list to the given list of events
|
||||
*
|
||||
* @param events
|
||||
* a list of events
|
||||
*/
|
||||
public void setEvents(List<ATSEvent> events) {
|
||||
this.events = events;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,175 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.api.records.apptimeline;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A class that holds a list of put errors. This is the response returned
|
||||
* when a list of {@link ATSEntity} objects is added to the application
|
||||
* timeline. If there are errors in storing individual entity objects,
|
||||
* they will be indicated in the list of errors.
|
||||
*/
|
||||
@XmlRootElement(name = "errors")
|
||||
@XmlAccessorType(XmlAccessType.NONE)
|
||||
@Public
|
||||
@Unstable
|
||||
public class ATSPutErrors {
|
||||
|
||||
private List<ATSPutError> errors = new ArrayList<ATSPutError>();
|
||||
|
||||
public ATSPutErrors() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of {@link ATSPutError} instances
|
||||
*
|
||||
* @return a list of {@link ATSPutError} instances
|
||||
*/
|
||||
@XmlElement(name = "errors")
|
||||
public List<ATSPutError> getErrors() {
|
||||
return errors;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single {@link ATSPutError} instance into the existing list
|
||||
*
|
||||
* @param error
|
||||
* a single {@link ATSPutError} instance
|
||||
*/
|
||||
public void addError(ATSPutError error) {
|
||||
errors.add(error);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a list of {@link ATSPutError} instances into the existing list
|
||||
*
|
||||
* @param errors
|
||||
* a list of {@link ATSPutError} instances
|
||||
*/
|
||||
public void addErrors(List<ATSPutError> errors) {
|
||||
this.errors.addAll(errors);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the list to the given list of {@link ATSPutError} instances
|
||||
*
|
||||
* @param errors
|
||||
* a list of {@link ATSPutError} instances
|
||||
*/
|
||||
public void setErrors(List<ATSPutError> errors) {
|
||||
this.errors.clear();
|
||||
this.errors.addAll(errors);
|
||||
}
|
||||
|
||||
/**
|
||||
* A class that holds the error code for one entity.
|
||||
*/
|
||||
@XmlRootElement(name = "error")
|
||||
@XmlAccessorType(XmlAccessType.NONE)
|
||||
@Public
|
||||
@Unstable
|
||||
public static class ATSPutError {
|
||||
/**
|
||||
* Error code returned when no start time can be found when putting an
|
||||
* entity. This occurs when the entity does not already exist in the
|
||||
* store and it is put with no start time or events specified.
|
||||
*/
|
||||
public static final int NO_START_TIME = 1;
|
||||
/**
|
||||
* Error code returned if an IOException is encountered when putting an
|
||||
* entity.
|
||||
*/
|
||||
public static final int IO_EXCEPTION = 2;
|
||||
|
||||
private String entityId;
|
||||
private String entityType;
|
||||
private int errorCode;
|
||||
|
||||
/**
|
||||
* Get the entity Id
|
||||
*
|
||||
* @return the entity Id
|
||||
*/
|
||||
@XmlElement(name = "entity")
|
||||
public String getEntityId() {
|
||||
return entityId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the entity Id
|
||||
*
|
||||
* @param entityId
|
||||
* the entity Id
|
||||
*/
|
||||
public void setEntityId(String entityId) {
|
||||
this.entityId = entityId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the entity type
|
||||
*
|
||||
* @return the entity type
|
||||
*/
|
||||
@XmlElement(name = "entitytype")
|
||||
public String getEntityType() {
|
||||
return entityType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the entity type
|
||||
*
|
||||
* @param entityType
|
||||
* the entity type
|
||||
*/
|
||||
public void setEntityType(String entityType) {
|
||||
this.entityType = entityType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the error code
|
||||
*
|
||||
* @return an error code
|
||||
*/
|
||||
@XmlElement(name = "errorcode")
|
||||
public int getErrorCode() {
|
||||
return errorCode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the error code to the given error code
|
||||
*
|
||||
* @param errorCode
|
||||
* an error code
|
||||
*/
|
||||
public void setErrorCode(int errorCode) {
|
||||
this.errorCode = errorCode;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
package org.apache.hadoop.yarn.api.records.apptimeline;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.conf;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
/**
|
||||
* Base class to implement ConfigurationProvider.
|
||||
* Real ConfigurationProvider implementations need to derive from it and
|
||||
* implement load methods to actually load the configuration.
|
||||
*/
|
||||
public abstract class ConfigurationProvider {
|
||||
|
||||
public void init(Configuration conf) throws Exception {
|
||||
initInternal(conf);
|
||||
}
|
||||
|
||||
public void close() throws Exception {
|
||||
closeInternal();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the configuration.
|
||||
* @param name The configuration file name
|
||||
* @return configuration
|
||||
* @throws YarnException
|
||||
* @throws IOException
|
||||
*/
|
||||
public abstract Configuration getConfiguration(String name)
|
||||
throws YarnException, IOException;
|
||||
|
||||
/**
|
||||
* Derived classes initialize themselves using this method.
|
||||
*/
|
||||
public abstract void initInternal(Configuration conf) throws Exception;
|
||||
|
||||
/**
|
||||
* Derived classes close themselves using this method.
|
||||
*/
|
||||
public abstract void closeInternal() throws Exception;
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.conf;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
/**
|
||||
* Factory for {@link ConfigurationProvider} implementations.
|
||||
*/
|
||||
public class ConfigurationProviderFactory {
|
||||
/**
|
||||
* Creates an instance of {@link ConfigurationProvider} using given
|
||||
* configuration.
|
||||
* @param conf
|
||||
* @return configurationProvider
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public static ConfigurationProvider
|
||||
getConfigurationProvider(Configuration conf) {
|
||||
Class<? extends ConfigurationProvider> defaultProviderClass;
|
||||
try {
|
||||
defaultProviderClass = (Class<? extends ConfigurationProvider>)
|
||||
Class.forName(
|
||||
YarnConfiguration.DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS);
|
||||
} catch (Exception e) {
|
||||
throw new YarnRuntimeException(
|
||||
"Invalid default configuration provider class"
|
||||
+ YarnConfiguration.DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS, e);
|
||||
}
|
||||
ConfigurationProvider configurationProvider = ReflectionUtils.newInstance(
|
||||
conf.getClass(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
|
||||
defaultProviderClass, ConfigurationProvider.class), conf);
|
||||
return configurationProvider;
|
||||
}
|
||||
}
|
|
@ -21,10 +21,13 @@ package org.apache.hadoop.yarn.conf;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Collection;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
|
@ -98,21 +101,7 @@ public class HAUtil {
|
|||
for (String id: ids) {
|
||||
// verify the RM service addresses configurations for every RMIds
|
||||
for (String prefix : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||
String confKey = null;
|
||||
try {
|
||||
confKey = addSuffix(prefix, id);
|
||||
if (conf.getTrimmed(confKey) == null) {
|
||||
throwBadConfigurationException(getNeedToSetValueMessage(confKey));
|
||||
}
|
||||
} catch (IllegalArgumentException iae) {
|
||||
String errmsg = iae.getMessage();
|
||||
if (confKey == null) {
|
||||
// Error at addSuffix
|
||||
errmsg = getInvalidValueMessage(YarnConfiguration.RM_HA_ID,
|
||||
getRMHAId(conf));
|
||||
}
|
||||
throwBadConfigurationException(errmsg);
|
||||
}
|
||||
checkAndSetRMRPCAddress(prefix, id, conf);
|
||||
}
|
||||
setValue.append(id);
|
||||
setValue.append(",");
|
||||
|
@ -122,10 +111,18 @@ public class HAUtil {
|
|||
}
|
||||
|
||||
private static void verifyAndSetCurrentRMHAId(Configuration conf) {
|
||||
String rmId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
|
||||
String rmId = getRMHAId(conf);
|
||||
if (rmId == null) {
|
||||
throwBadConfigurationException(
|
||||
getNeedToSetValueMessage(YarnConfiguration.RM_HA_ID));
|
||||
StringBuilder msg = new StringBuilder();
|
||||
msg.append("Can not find valid RM_HA_ID. None of ");
|
||||
for (String id : conf
|
||||
.getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS)) {
|
||||
msg.append(addSuffix(YarnConfiguration.RM_ADDRESS, id) + " ");
|
||||
}
|
||||
msg.append(" are matching" +
|
||||
" the local address OR " + YarnConfiguration.RM_HA_ID + " is not" +
|
||||
" specified in HA Configuration");
|
||||
throwBadConfigurationException(msg.toString());
|
||||
} else {
|
||||
Collection<String> ids = getRMHAIds(conf);
|
||||
if (!ids.contains(rmId)) {
|
||||
|
@ -179,7 +176,34 @@ public class HAUtil {
|
|||
* @return RM Id on success
|
||||
*/
|
||||
public static String getRMHAId(Configuration conf) {
|
||||
return conf.get(YarnConfiguration.RM_HA_ID);
|
||||
int found = 0;
|
||||
String currentRMId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
|
||||
if(currentRMId == null) {
|
||||
for(String rmId : getRMHAIds(conf)) {
|
||||
String key = addSuffix(YarnConfiguration.RM_ADDRESS, rmId);
|
||||
String addr = conf.get(key);
|
||||
if (addr == null) {
|
||||
continue;
|
||||
}
|
||||
InetSocketAddress s;
|
||||
try {
|
||||
s = NetUtils.createSocketAddr(addr);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception in creating socket address " + addr, e);
|
||||
continue;
|
||||
}
|
||||
if (!s.isUnresolved() && NetUtils.isLocalAddress(s.getAddress())) {
|
||||
currentRMId = rmId.trim();
|
||||
found++;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (found > 1) { // Only one address must match the local address
|
||||
String msg = "The HA Configuration has multiple addresses that match "
|
||||
+ "local node's address.";
|
||||
throw new HadoopIllegalArgumentException(msg);
|
||||
}
|
||||
return currentRMId;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
@ -212,9 +236,13 @@ public class HAUtil {
|
|||
@InterfaceAudience.Private
|
||||
@VisibleForTesting
|
||||
static String getConfKeyForRMInstance(String prefix, Configuration conf) {
|
||||
return YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS.contains(prefix)
|
||||
? addSuffix(prefix, getRMHAId(conf))
|
||||
: prefix;
|
||||
if (!YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS.contains(prefix)) {
|
||||
return prefix;
|
||||
} else {
|
||||
String RMId = getRMHAId(conf);
|
||||
checkAndSetRMRPCAddress(prefix, RMId, conf);
|
||||
return addSuffix(prefix, RMId);
|
||||
}
|
||||
}
|
||||
|
||||
public static String getConfValueForRMInstance(String prefix,
|
||||
|
@ -247,4 +275,30 @@ public class HAUtil {
|
|||
}
|
||||
return key + "." + suffix;
|
||||
}
|
||||
|
||||
private static void checkAndSetRMRPCAddress(String prefix, String RMId,
|
||||
Configuration conf) {
|
||||
String rpcAddressConfKey = null;
|
||||
try {
|
||||
rpcAddressConfKey = addSuffix(prefix, RMId);
|
||||
if (conf.getTrimmed(rpcAddressConfKey) == null) {
|
||||
String hostNameConfKey = addSuffix(YarnConfiguration.RM_HOSTNAME, RMId);
|
||||
String confVal = conf.getTrimmed(hostNameConfKey);
|
||||
if (confVal == null) {
|
||||
throwBadConfigurationException(getNeedToSetValueMessage(
|
||||
hostNameConfKey + " or " + addSuffix(prefix, RMId)));
|
||||
} else {
|
||||
conf.set(addSuffix(prefix, RMId), confVal + ":"
|
||||
+ YarnConfiguration.getRMDefaultPortNumber(prefix));
|
||||
}
|
||||
}
|
||||
} catch (IllegalArgumentException iae) {
|
||||
String errmsg = iae.getMessage();
|
||||
if (rpcAddressConfKey == null) {
|
||||
// Error at addSuffix
|
||||
errmsg = getInvalidValueMessage(YarnConfiguration.RM_HA_ID, RMId);
|
||||
}
|
||||
throwBadConfigurationException(errmsg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,8 +23,10 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
|
@ -37,8 +39,26 @@ import org.apache.hadoop.yarn.api.ApplicationConstants;
|
|||
@Evolving
|
||||
public class YarnConfiguration extends Configuration {
|
||||
|
||||
@Private
|
||||
public static final String CS_CONFIGURATION_FILE= "capacity-scheduler.xml";
|
||||
|
||||
@Private
|
||||
public static final String HADOOP_POLICY_CONFIGURATION_FILE =
|
||||
"hadoop-policy.xml";
|
||||
|
||||
@Private
|
||||
public static final String YARN_SITE_XML_FILE = "yarn-site.xml";
|
||||
|
||||
@Private
|
||||
public static final String CORE_SITE_CONFIGURATION_FILE = "core-site.xml";
|
||||
|
||||
@Evolving
|
||||
public static final int APPLICATION_MAX_TAGS = 10;
|
||||
|
||||
@Evolving
|
||||
public static final int APPLICATION_MAX_TAG_LENGTH = 100;
|
||||
|
||||
private static final String YARN_DEFAULT_XML_FILE = "yarn-default.xml";
|
||||
private static final String YARN_SITE_XML_FILE = "yarn-site.xml";
|
||||
|
||||
static {
|
||||
Configuration.addDefaultResource(YARN_DEFAULT_XML_FILE);
|
||||
|
@ -89,6 +109,8 @@ public class YarnConfiguration extends Configuration {
|
|||
|
||||
public static final String RM_CLUSTER_ID = RM_PREFIX + "cluster-id";
|
||||
|
||||
public static final String RM_HOSTNAME = RM_PREFIX + "hostname";
|
||||
|
||||
/** The address of the applications manager interface in the RM.*/
|
||||
public static final String RM_ADDRESS =
|
||||
RM_PREFIX + "address";
|
||||
|
@ -329,6 +351,16 @@ public class YarnConfiguration extends Configuration {
|
|||
public static final String RM_HA_IDS = RM_HA_PREFIX + "rm-ids";
|
||||
public static final String RM_HA_ID = RM_HA_PREFIX + "id";
|
||||
|
||||
/** Store the related configuration files in File System */
|
||||
public static final String FS_BASED_RM_CONF_STORE = RM_PREFIX
|
||||
+ "configuration.file-system-based-store";
|
||||
public static final String DEFAULT_FS_BASED_RM_CONF_STORE = "/yarn/conf";
|
||||
|
||||
public static final String RM_CONFIGURATION_PROVIDER_CLASS = RM_PREFIX
|
||||
+ "configuration.provider-class";
|
||||
public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
|
||||
"org.apache.hadoop.yarn.LocalConfigurationProvider";
|
||||
|
||||
@Private
|
||||
public static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS =
|
||||
Collections.unmodifiableList(Arrays.asList(
|
||||
|
@ -344,11 +376,11 @@ public class YarnConfiguration extends Configuration {
|
|||
|
||||
public static final String AUTO_FAILOVER_ENABLED =
|
||||
AUTO_FAILOVER_PREFIX + "enabled";
|
||||
public static final boolean DEFAULT_AUTO_FAILOVER_ENABLED = false;
|
||||
public static final boolean DEFAULT_AUTO_FAILOVER_ENABLED = true;
|
||||
|
||||
public static final String AUTO_FAILOVER_EMBEDDED =
|
||||
AUTO_FAILOVER_PREFIX + "embedded";
|
||||
public static final boolean DEFAULT_AUTO_FAILOVER_EMBEDDED = false;
|
||||
public static final boolean DEFAULT_AUTO_FAILOVER_EMBEDDED = true;
|
||||
|
||||
public static final String AUTO_FAILOVER_ZK_BASE_PATH =
|
||||
AUTO_FAILOVER_PREFIX + "zk-base-path";
|
||||
|
@ -597,6 +629,7 @@ public class YarnConfiguration extends Configuration {
|
|||
*/
|
||||
public static final String NM_LOG_RETAIN_SECONDS = NM_PREFIX
|
||||
+ "log.retain-seconds";
|
||||
public static final long DEFAULT_NM_LOG_RETAIN_SECONDS = 3 * 60 * 60;
|
||||
|
||||
/**
|
||||
* Number of threads used in log cleanup. Only applicable if Log aggregation
|
||||
|
@ -999,6 +1032,19 @@ public class YarnConfiguration extends Configuration {
|
|||
public static final String AHS_WEBAPP_SPNEGO_KEYTAB_FILE_KEY =
|
||||
AHS_PREFIX + "webapp.spnego-keytab-file";
|
||||
|
||||
////////////////////////////////
|
||||
// ATS Configs
|
||||
////////////////////////////////
|
||||
|
||||
public static final String ATS_PREFIX = YARN_PREFIX + "ats.";
|
||||
|
||||
/** ATS store class */
|
||||
public static final String ATS_STORE = ATS_PREFIX + "store.class";
|
||||
|
||||
/** ATS leveldb path */
|
||||
public static final String ATS_LEVELDB_PATH_PROPERTY =
|
||||
ATS_PREFIX + "leveldb-apptimeline-store.path";
|
||||
|
||||
////////////////////////////////
|
||||
// Other Configs
|
||||
////////////////////////////////
|
||||
|
@ -1101,4 +1147,27 @@ public class YarnConfiguration extends Configuration {
|
|||
}
|
||||
return super.updateConnectAddr(prefix, addr);
|
||||
}
|
||||
|
||||
@Private
|
||||
public static int getRMDefaultPortNumber(String addressPrefix) {
|
||||
if (addressPrefix.equals(YarnConfiguration.RM_ADDRESS)) {
|
||||
return YarnConfiguration.DEFAULT_RM_PORT;
|
||||
} else if (addressPrefix.equals(YarnConfiguration.RM_SCHEDULER_ADDRESS)) {
|
||||
return YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT;
|
||||
} else if (addressPrefix.equals(YarnConfiguration.RM_WEBAPP_ADDRESS)) {
|
||||
return YarnConfiguration.DEFAULT_RM_WEBAPP_PORT;
|
||||
} else if (addressPrefix.equals(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS)) {
|
||||
return YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT;
|
||||
} else if (addressPrefix
|
||||
.equals(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS)) {
|
||||
return YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT;
|
||||
} else if (addressPrefix.equals(YarnConfiguration.RM_ADMIN_ADDRESS)) {
|
||||
return YarnConfiguration.DEFAULT_RM_ADMIN_PORT;
|
||||
} else {
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"Invalid RM RPC address Prefix: " + addressPrefix
|
||||
+ ". The valid value should be one of "
|
||||
+ YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -190,6 +190,7 @@ message ApplicationReportProto {
|
|||
optional float progress = 17;
|
||||
optional string applicationType = 18;
|
||||
optional hadoop.common.TokenProto am_rm_token = 19;
|
||||
repeated string applicationTags = 20;
|
||||
}
|
||||
|
||||
message ApplicationAttemptReportProto {
|
||||
|
@ -287,6 +288,7 @@ message ApplicationSubmissionContextProto {
|
|||
optional ResourceProto resource = 9;
|
||||
optional string applicationType = 10 [default = "YARN"];
|
||||
optional bool keep_containers_across_application_attempts = 11 [default = false];
|
||||
repeated string applicationTags = 12;
|
||||
}
|
||||
|
||||
enum ApplicationAccessTypeProto {
|
||||
|
|
|
@ -136,6 +136,12 @@ message MoveApplicationAcrossQueuesRequestProto {
|
|||
message MoveApplicationAcrossQueuesResponseProto {
|
||||
}
|
||||
|
||||
enum ApplicationsRequestScopeProto {
|
||||
ALL = 0;
|
||||
VIEWABLE = 1;
|
||||
OWN = 2;
|
||||
}
|
||||
|
||||
message GetApplicationsRequestProto {
|
||||
repeated string application_types = 1;
|
||||
repeated YarnApplicationStateProto application_states = 2;
|
||||
|
@ -146,6 +152,8 @@ message GetApplicationsRequestProto {
|
|||
optional int64 start_end = 7;
|
||||
optional int64 finish_begin = 8;
|
||||
optional int64 finish_end = 9;
|
||||
repeated string applicationTags = 10;
|
||||
optional ApplicationsRequestScopeProto scope = 11 [default = ALL];
|
||||
}
|
||||
|
||||
message GetApplicationsResponseProto {
|
||||
|
|
|
@ -232,7 +232,6 @@ public class ApplicationMaster {
|
|||
private static final String shellArgsPath = "shellArgs";
|
||||
|
||||
private volatile boolean done;
|
||||
private volatile boolean success;
|
||||
|
||||
private ByteBuffer allTokens;
|
||||
|
||||
|
@ -254,8 +253,8 @@ public class ApplicationMaster {
|
|||
if (!doRun) {
|
||||
System.exit(0);
|
||||
}
|
||||
result = appMaster.run();
|
||||
appMaster.finish();
|
||||
appMaster.run();
|
||||
result = appMaster.finish();
|
||||
} catch (Throwable t) {
|
||||
LOG.fatal("Error running ApplicationMaster", t);
|
||||
System.exit(1);
|
||||
|
@ -480,7 +479,7 @@ public class ApplicationMaster {
|
|||
* @throws IOException
|
||||
*/
|
||||
@SuppressWarnings({ "unchecked" })
|
||||
public boolean run() throws YarnException, IOException {
|
||||
public void run() throws YarnException, IOException {
|
||||
LOG.info("Starting ApplicationMaster");
|
||||
|
||||
Credentials credentials =
|
||||
|
@ -561,7 +560,6 @@ public class ApplicationMaster {
|
|||
amRMClient.addContainerRequest(containerAsk);
|
||||
}
|
||||
numRequestedContainers.set(numTotalContainersToRequest);
|
||||
return success;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
@ -569,7 +567,8 @@ public class ApplicationMaster {
|
|||
return new NMCallbackHandler(this);
|
||||
}
|
||||
|
||||
protected void finish() {
|
||||
@VisibleForTesting
|
||||
protected boolean finish() {
|
||||
// wait for completion.
|
||||
while (!done
|
||||
&& (numCompletedContainers.get() != numTotalContainers)) {
|
||||
|
@ -600,7 +599,7 @@ public class ApplicationMaster {
|
|||
|
||||
FinalApplicationStatus appStatus;
|
||||
String appMessage = null;
|
||||
success = true;
|
||||
boolean success = true;
|
||||
if (numFailedContainers.get() == 0 &&
|
||||
numCompletedContainers.get() == numTotalContainers) {
|
||||
appStatus = FinalApplicationStatus.SUCCEEDED;
|
||||
|
@ -621,6 +620,8 @@ public class ApplicationMaster {
|
|||
}
|
||||
|
||||
amRMClient.stop();
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
private class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {
|
||||
|
|
|
@ -18,13 +18,13 @@
|
|||
|
||||
package org.apache.hadoop.yarn.applications.distributedshell;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Map;
|
||||
|
||||
public class ContainerLaunchFailAppMaster extends ApplicationMaster {
|
||||
|
||||
private static final Log LOG =
|
||||
|
@ -66,8 +66,8 @@ public class ContainerLaunchFailAppMaster extends ApplicationMaster {
|
|||
if (!doRun) {
|
||||
System.exit(0);
|
||||
}
|
||||
result = appMaster.run();
|
||||
appMaster.finish();
|
||||
appMaster.run();
|
||||
result = appMaster.finish();
|
||||
} catch (Throwable t) {
|
||||
LOG.fatal("Error running ApplicationMaster", t);
|
||||
System.exit(1);
|
||||
|
|
|
@ -29,8 +29,8 @@ public class TestDSFailedAppMaster extends ApplicationMaster {
|
|||
private static final Log LOG = LogFactory.getLog(TestDSFailedAppMaster.class);
|
||||
|
||||
@Override
|
||||
public boolean run() throws YarnException, IOException {
|
||||
boolean res = super.run();
|
||||
public void run() throws YarnException, IOException {
|
||||
super.run();
|
||||
|
||||
// for the 2nd attempt.
|
||||
if (appAttemptID.getAttemptId() == 2) {
|
||||
|
@ -39,11 +39,12 @@ public class TestDSFailedAppMaster extends ApplicationMaster {
|
|||
// numRequestedContainers should be set to 0.
|
||||
if (numAllocatedContainers.get() != 1
|
||||
|| numRequestedContainers.get() != 0) {
|
||||
LOG.info("Application Master failed. exiting");
|
||||
LOG.info("NumAllocatedContainers is " + numAllocatedContainers.get()
|
||||
+ " and NumRequestedContainers is " + numAllocatedContainers.get()
|
||||
+ ".Application Master failed. exiting");
|
||||
System.exit(200);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
@ -54,7 +55,7 @@ public class TestDSFailedAppMaster extends ApplicationMaster {
|
|||
if (!doRun) {
|
||||
System.exit(0);
|
||||
}
|
||||
result = appMaster.run();
|
||||
appMaster.run();
|
||||
if (appMaster.appAttemptID.getAttemptId() == 1) {
|
||||
try {
|
||||
// sleep some time, wait for the AM to launch a container.
|
||||
|
@ -63,7 +64,7 @@ public class TestDSFailedAppMaster extends ApplicationMaster {
|
|||
// fail the first am.
|
||||
System.exit(100);
|
||||
}
|
||||
appMaster.finish();
|
||||
result = appMaster.finish();
|
||||
} catch (Throwable t) {
|
||||
System.exit(1);
|
||||
}
|
||||
|
|
|
@ -436,4 +436,19 @@ public abstract class YarnClient extends AbstractService {
|
|||
public abstract List<ContainerReport> getContainers(
|
||||
ApplicationAttemptId applicationAttemptId) throws YarnException,
|
||||
IOException;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Attempts to move the given application to the given queue.
|
||||
* </p>
|
||||
*
|
||||
* @param appId
|
||||
* Application to move.
|
||||
* @param queue
|
||||
* Queue to place it in to.
|
||||
* @throws YarnException
|
||||
* @throws IOException
|
||||
*/
|
||||
public abstract void moveApplicationAcrossQueues(ApplicationId appId,
|
||||
String queue) throws YarnException, IOException;
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
|
|||
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
|
||||
|
@ -478,4 +479,12 @@ public class YarnClientImpl extends YarnClient {
|
|||
}
|
||||
throw new YarnException("History service is not enabled.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void moveApplicationAcrossQueues(ApplicationId appId,
|
||||
String queue) throws YarnException, IOException {
|
||||
MoveApplicationAcrossQueuesRequest request =
|
||||
MoveApplicationAcrossQueuesRequest.newInstance(appId, queue);
|
||||
rmClient.moveApplicationAcrossQueues(request);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,6 +61,7 @@ public class ApplicationCLI extends YarnCLI {
|
|||
private static final String APP_TYPE_CMD = "appTypes";
|
||||
private static final String APP_STATE_CMD = "appStates";
|
||||
private static final String ALLSTATES_OPTION = "ALL";
|
||||
private static final String QUEUE_CMD = "queue";
|
||||
public static final String APPLICATION = "application";
|
||||
public static final String APPLICATION_ATTEMPT = "applicationattempt";
|
||||
public static final String CONTAINER = "container";
|
||||
|
@ -96,6 +97,10 @@ public class ApplicationCLI extends YarnCLI {
|
|||
+ "and -appStates to filter applications based on application state");
|
||||
}
|
||||
opts.addOption(KILL_CMD, true, "Kills the application.");
|
||||
opts.addOption(MOVE_TO_QUEUE_CMD, true, "Moves the application to a "
|
||||
+ "different queue.");
|
||||
opts.addOption(QUEUE_CMD, true, "Works with the movetoqueue command to"
|
||||
+ " specify which queue to move an application to.");
|
||||
opts.addOption(HELP_CMD, false, "Displays help for all commands.");
|
||||
Option appTypeOpt = new Option(APP_TYPE_CMD, true, "Works with -list to "
|
||||
+ "filter applications based on "
|
||||
|
@ -112,6 +117,8 @@ public class ApplicationCLI extends YarnCLI {
|
|||
appStateOpt.setArgName("States");
|
||||
opts.addOption(appStateOpt);
|
||||
opts.getOption(KILL_CMD).setArgName("Application ID");
|
||||
opts.getOption(MOVE_TO_QUEUE_CMD).setArgName("Application ID");
|
||||
opts.getOption(QUEUE_CMD).setArgName("Queue Name");
|
||||
opts.getOption(STATUS_CMD).setArgName("Application ID");
|
||||
|
||||
int exitCode = -1;
|
||||
|
@ -197,11 +204,18 @@ public class ApplicationCLI extends YarnCLI {
|
|||
listApplications(appTypes, appStates);
|
||||
}
|
||||
} else if (cliParser.hasOption(KILL_CMD)) {
|
||||
if (args.length != 2) {
|
||||
if (args.length != 3) {
|
||||
printUsage(opts);
|
||||
return exitCode;
|
||||
}
|
||||
killApplication(cliParser.getOptionValue(KILL_CMD));
|
||||
} else if (cliParser.hasOption(MOVE_TO_QUEUE_CMD)) {
|
||||
if (!cliParser.hasOption(QUEUE_CMD)) {
|
||||
printUsage(opts);
|
||||
return exitCode;
|
||||
}
|
||||
moveApplicationAcrossQueues(cliParser.getOptionValue(MOVE_TO_QUEUE_CMD),
|
||||
cliParser.getOptionValue(QUEUE_CMD));
|
||||
} else if (cliParser.hasOption(HELP_CMD)) {
|
||||
printUsage(opts);
|
||||
return 0;
|
||||
|
@ -366,6 +380,28 @@ public class ApplicationCLI extends YarnCLI {
|
|||
client.killApplication(appId);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Kills the application with the application id as appId
|
||||
*
|
||||
* @param applicationId
|
||||
* @throws YarnException
|
||||
* @throws IOException
|
||||
*/
|
||||
private void moveApplicationAcrossQueues(String applicationId, String queue)
|
||||
throws YarnException, IOException {
|
||||
ApplicationId appId = ConverterUtils.toApplicationId(applicationId);
|
||||
ApplicationReport appReport = client.getApplicationReport(appId);
|
||||
if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED
|
||||
|| appReport.getYarnApplicationState() == YarnApplicationState.KILLED
|
||||
|| appReport.getYarnApplicationState() == YarnApplicationState.FAILED) {
|
||||
sysout.println("Application " + applicationId + " has already finished ");
|
||||
} else {
|
||||
sysout.println("Moving application " + applicationId + " to queue " + queue);
|
||||
client.moveApplicationAcrossQueues(appId, queue);
|
||||
sysout.println("Successfully completed move.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints the application report for an application id.
|
||||
|
|
|
@ -33,6 +33,7 @@ public abstract class YarnCLI extends Configured implements Tool {
|
|||
public static final String STATUS_CMD = "status";
|
||||
public static final String LIST_CMD = "list";
|
||||
public static final String KILL_CMD = "kill";
|
||||
public static final String MOVE_TO_QUEUE_CMD = "movetoqueue";
|
||||
public static final String HELP_CMD = "help";
|
||||
protected PrintStream sysout;
|
||||
protected PrintStream syserr;
|
||||
|
|
|
@ -172,8 +172,6 @@ public class TestRMFailover extends ClientBaseWithFixes {
|
|||
@Test
|
||||
public void testAutomaticFailover()
|
||||
throws YarnException, InterruptedException, IOException {
|
||||
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, true);
|
||||
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_EMBEDDED, true);
|
||||
conf.set(YarnConfiguration.RM_CLUSTER_ID, "yarn-test-cluster");
|
||||
conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
|
||||
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, 2000);
|
||||
|
@ -193,6 +191,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
|
|||
@Test
|
||||
public void testWebAppProxyInStandAloneMode() throws YarnException,
|
||||
InterruptedException, IOException {
|
||||
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
|
||||
WebAppProxyServer webAppProxyServer = new WebAppProxyServer();
|
||||
try {
|
||||
conf.set(YarnConfiguration.PROXY_ADDRESS, "0.0.0.0:9099");
|
||||
|
@ -227,6 +226,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
|
|||
@Test
|
||||
public void testEmbeddedWebAppProxy() throws YarnException,
|
||||
InterruptedException, IOException {
|
||||
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
|
||||
cluster.init(conf);
|
||||
cluster.start();
|
||||
getAdminService(0).transitionToActive(req);
|
||||
|
|
|
@ -675,13 +675,14 @@ public class TestYarnCLI {
|
|||
int result = spyCli.run(new String[] { "-help" });
|
||||
Assert.assertTrue(result == 0);
|
||||
verify(spyCli).printUsage(any(Options.class));
|
||||
System.err.println(sysOutStream.toString()); //todo sandyt remove this hejfkdsl
|
||||
Assert.assertEquals(createApplicationCLIHelpMessage(),
|
||||
sysOutStream.toString());
|
||||
|
||||
sysOutStream.reset();
|
||||
ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
|
||||
result =
|
||||
cli.run(new String[] { "-kill", applicationId.toString(), "args" });
|
||||
cli.run(new String[] {"application", "-kill", applicationId.toString(), "args" });
|
||||
verify(spyCli).printUsage(any(Options.class));
|
||||
Assert.assertEquals(createApplicationCLIHelpMessage(),
|
||||
sysOutStream.toString());
|
||||
|
@ -717,7 +718,7 @@ public class TestYarnCLI {
|
|||
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
|
||||
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
|
||||
newApplicationReport2);
|
||||
int result = cli.run(new String[] { "-kill", applicationId.toString() });
|
||||
int result = cli.run(new String[] { "application","-kill", applicationId.toString() });
|
||||
assertEquals(0, result);
|
||||
verify(client, times(0)).killApplication(any(ApplicationId.class));
|
||||
verify(sysOut).println(
|
||||
|
@ -730,7 +731,7 @@ public class TestYarnCLI {
|
|||
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
|
||||
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
|
||||
newApplicationReport);
|
||||
result = cli.run(new String[] { "-kill", applicationId.toString() });
|
||||
result = cli.run(new String[] { "application","-kill", applicationId.toString() });
|
||||
assertEquals(0, result);
|
||||
verify(client).killApplication(any(ApplicationId.class));
|
||||
verify(sysOut).println("Killing application application_1234_0005");
|
||||
|
@ -740,7 +741,57 @@ public class TestYarnCLI {
|
|||
.getApplicationReport(applicationId);
|
||||
cli = createAndGetAppCLI();
|
||||
try {
|
||||
cli.run(new String[] { "-kill", applicationId.toString() });
|
||||
cli.run(new String[] { "application","-kill", applicationId.toString() });
|
||||
Assert.fail();
|
||||
} catch (Exception ex) {
|
||||
Assert.assertTrue(ex instanceof ApplicationNotFoundException);
|
||||
Assert.assertEquals("Application with id '" + applicationId +
|
||||
"' doesn't exist in RM.", ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMoveApplicationAcrossQueues() throws Exception {
|
||||
ApplicationCLI cli = createAndGetAppCLI();
|
||||
ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
|
||||
|
||||
ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
|
||||
applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
|
||||
"user", "queue", "appname", "host", 124, null,
|
||||
YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
|
||||
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
|
||||
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
|
||||
newApplicationReport2);
|
||||
int result = cli.run(new String[] { "-movetoqueue", applicationId.toString(),
|
||||
"-queue", "targetqueue"});
|
||||
assertEquals(0, result);
|
||||
verify(client, times(0)).moveApplicationAcrossQueues(
|
||||
any(ApplicationId.class), any(String.class));
|
||||
verify(sysOut).println(
|
||||
"Application " + applicationId + " has already finished ");
|
||||
|
||||
ApplicationReport newApplicationReport = ApplicationReport.newInstance(
|
||||
applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
|
||||
"user", "queue", "appname", "host", 124, null,
|
||||
YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
|
||||
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
|
||||
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
|
||||
newApplicationReport);
|
||||
result = cli.run(new String[] { "-movetoqueue", applicationId.toString(),
|
||||
"-queue", "targetqueue"});
|
||||
assertEquals(0, result);
|
||||
verify(client).moveApplicationAcrossQueues(any(ApplicationId.class),
|
||||
any(String.class));
|
||||
verify(sysOut).println("Moving application application_1234_0005 to queue targetqueue");
|
||||
verify(sysOut).println("Successfully completed move.");
|
||||
|
||||
doThrow(new ApplicationNotFoundException("Application with id '"
|
||||
+ applicationId + "' doesn't exist in RM.")).when(client)
|
||||
.moveApplicationAcrossQueues(applicationId, "targetqueue");
|
||||
cli = createAndGetAppCLI();
|
||||
try {
|
||||
result = cli.run(new String[] { "-movetoqueue", applicationId.toString(),
|
||||
"-queue", "targetqueue"});
|
||||
Assert.fail();
|
||||
} catch (Exception ex) {
|
||||
Assert.assertTrue(ex instanceof ApplicationNotFoundException);
|
||||
|
@ -1087,23 +1138,28 @@ public class TestYarnCLI {
|
|||
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
PrintWriter pw = new PrintWriter(baos);
|
||||
pw.println("usage: application");
|
||||
pw.println(" -appStates <States> Works with -list to filter applications based");
|
||||
pw.println(" on input comma-separated list of application");
|
||||
pw.println(" states. The valid application state can be one");
|
||||
pw.println(" of the following:");
|
||||
pw.println(" ALL,NEW,NEW_SAVING,SUBMITTED,ACCEPTED,RUNNING,");
|
||||
pw.println(" FINISHED,FAILED,KILLED");
|
||||
pw.println(" -appTypes <Types> Works with -list to filter applications based");
|
||||
pw.println(" on input comma-separated list of application");
|
||||
pw.println(" types.");
|
||||
pw.println(" -help Displays help for all commands.");
|
||||
pw.println(" -kill <Application ID> Kills the application.");
|
||||
pw.println(" -list List applications from the RM. Supports");
|
||||
pw.println(" optional use of -appTypes to filter");
|
||||
pw.println(" applications based on application type, and");
|
||||
pw.println(" -appStates to filter applications based on");
|
||||
pw.println(" application state");
|
||||
pw.println(" -status <Application ID> Prints the status of the application.");
|
||||
pw.println(" -appStates <States> Works with -list to filter applications");
|
||||
pw.println(" based on input comma-separated list of");
|
||||
pw.println(" application states. The valid application");
|
||||
pw.println(" state can be one of the following:");
|
||||
pw.println(" ALL,NEW,NEW_SAVING,SUBMITTED,ACCEPTED,RUN");
|
||||
pw.println(" NING,FINISHED,FAILED,KILLED");
|
||||
pw.println(" -appTypes <Types> Works with -list to filter applications");
|
||||
pw.println(" based on input comma-separated list of");
|
||||
pw.println(" application types.");
|
||||
pw.println(" -help Displays help for all commands.");
|
||||
pw.println(" -kill <Application ID> Kills the application.");
|
||||
pw.println(" -list List applications from the RM. Supports");
|
||||
pw.println(" optional use of -appTypes to filter");
|
||||
pw.println(" applications based on application type,");
|
||||
pw.println(" and -appStates to filter applications");
|
||||
pw.println(" based on application state");
|
||||
pw.println(" -movetoqueue <Application ID> Moves the application to a different");
|
||||
pw.println(" queue.");
|
||||
pw.println(" -queue <Queue Name> Works with the movetoqueue command to");
|
||||
pw.println(" specify which queue to move an");
|
||||
pw.println(" application to.");
|
||||
pw.println(" -status <Application ID> Prints the status of the application.");
|
||||
pw.close();
|
||||
String appsHelpStr = baos.toString("UTF-8");
|
||||
return appsHelpStr;
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.yarn.conf.ConfigurationProvider;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public class FileSystemBasedConfigurationProvider
|
||||
extends ConfigurationProvider {
|
||||
|
||||
private static final Log LOG = LogFactory
|
||||
.getLog(FileSystemBasedConfigurationProvider.class);
|
||||
private FileSystem fs;
|
||||
private Path configDir;
|
||||
|
||||
@Override
|
||||
public synchronized Configuration getConfiguration(String name)
|
||||
throws IOException, YarnException {
|
||||
Path configPath = new Path(this.configDir, name);
|
||||
if (!fs.exists(configPath)) {
|
||||
throw new YarnException("Can not find Configuration: " + name + " in "
|
||||
+ configDir);
|
||||
}
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.addResource(fs.open(configPath));
|
||||
return conf;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void initInternal(Configuration conf) throws Exception {
|
||||
configDir =
|
||||
new Path(conf.get(YarnConfiguration.FS_BASED_RM_CONF_STORE,
|
||||
YarnConfiguration.DEFAULT_FS_BASED_RM_CONF_STORE));
|
||||
fs = configDir.getFileSystem(conf);
|
||||
if (!fs.exists(configDir)) {
|
||||
fs.mkdirs(configDir);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void closeInternal() throws Exception {
|
||||
fs.close();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.conf.ConfigurationProvider;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public class LocalConfigurationProvider extends ConfigurationProvider {
|
||||
|
||||
@Override
|
||||
public Configuration getConfiguration(String name)
|
||||
throws IOException, YarnException {
|
||||
return new Configuration();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initInternal(Configuration conf) throws Exception {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
@Override
|
||||
public void closeInternal() throws Exception {
|
||||
// Do nothing
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue