Merge branch 'trunk' into HDFS-7240
Conflicts: hadoop-common-project/hadoop-common/src/main/conf/log4j.properties hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
This commit is contained in:
commit
86414507f4
@ -58,6 +58,7 @@ RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
|
||||
protobuf-c-compiler \
|
||||
python \
|
||||
python2.7 \
|
||||
python2.7-dev \
|
||||
python-pip \
|
||||
rsync \
|
||||
snappy \
|
||||
@ -92,6 +93,7 @@ RUN mkdir -p /opt/maven && \
|
||||
-o /opt/maven.tar.gz && \
|
||||
tar xzf /opt/maven.tar.gz --strip-components 1 -C /opt/maven
|
||||
ENV MAVEN_HOME /opt/maven
|
||||
ENV PATH "$PATH:/opt/maven/bin"
|
||||
|
||||
######
|
||||
# Install findbugs
|
||||
|
@ -111,6 +111,16 @@
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.rat</groupId>
|
||||
<artifactId>apache-rat-plugin</artifactId>
|
||||
<configuration>
|
||||
<excludes>
|
||||
<exclude>maven-eclipse.xml</exclude>
|
||||
<exclude>.externalToolBuilders/Maven_Ant_Builder.launch</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
@ -460,10 +460,6 @@
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>commons-httpclient</groupId>
|
||||
<artifactId>commons-httpclient</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
|
@ -318,6 +318,11 @@
|
||||
<artifactId>aalto-xml</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.squareup.okhttp3</groupId>
|
||||
<artifactId>mockwebserver</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
|
@ -322,6 +322,7 @@ log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
|
||||
log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
|
||||
log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
|
||||
|
||||
<<<<<<< HEAD
|
||||
# Fair scheduler requests log on state dump
|
||||
log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=DEBUG,FSLOGGER
|
||||
log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=false
|
||||
@ -355,3 +356,16 @@ log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
|
||||
(%F:%L) %X{function} %X{resource} %X{user} %X{request} - \
|
||||
%m%n
|
||||
#
|
||||
# Fair scheduler state dump
|
||||
#
|
||||
# Use following logger to dump the state to a separate file
|
||||
|
||||
#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=DEBUG,FSSTATEDUMP
|
||||
#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=false
|
||||
#log4j.appender.FSSTATEDUMP=org.apache.log4j.RollingFileAppender
|
||||
#log4j.appender.FSSTATEDUMP.File=${hadoop.log.dir}/fairscheduler-statedump.log
|
||||
#log4j.appender.FSSTATEDUMP.layout=org.apache.log4j.PatternLayout
|
||||
#log4j.appender.FSSTATEDUMP.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
#log4j.appender.FSSTATEDUMP.MaxFileSize=${hadoop.log.maxfilesize}
|
||||
#log4j.appender.FSSTATEDUMP.MaxBackupIndex=${hadoop.log.maxbackupindex}
|
||||
|
@ -25,6 +25,8 @@
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
|
||||
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Tool for redacting sensitive information when displaying config parameters.
|
||||
*
|
||||
@ -42,7 +44,8 @@ public ConfigRedactor(Configuration conf) {
|
||||
String sensitiveRegexList = conf.get(
|
||||
HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS,
|
||||
HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS_DEFAULT);
|
||||
List<String> sensitiveRegexes = Arrays.asList(sensitiveRegexList.split(","));
|
||||
List<String> sensitiveRegexes =
|
||||
Arrays.asList(StringUtils.getTrimmedStrings(sensitiveRegexList));
|
||||
compiledPatterns = new ArrayList<Pattern>();
|
||||
for (String regex : sensitiveRegexes) {
|
||||
Pattern p = Pattern.compile(regex);
|
||||
|
@ -252,7 +252,9 @@ public synchronized void close() throws IOException {
|
||||
*/
|
||||
@Override
|
||||
public synchronized void flush() throws IOException {
|
||||
checkStream();
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
encrypt();
|
||||
super.flush();
|
||||
}
|
||||
|
@ -810,12 +810,15 @@ public class CommonConfigurationKeysPublic {
|
||||
public static final String HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS =
|
||||
"hadoop.security.sensitive-config-keys";
|
||||
public static final String HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS_DEFAULT =
|
||||
"secret$" + "," +
|
||||
"password$" + "," +
|
||||
"ssl.keystore.pass$" + "," +
|
||||
"fs.s3.*[Ss]ecret.?[Kk]ey" + "," +
|
||||
"fs.azure\\.account.key.*" + "," +
|
||||
"dfs.webhdfs.oauth2.[a-z]+.token" + "," +
|
||||
HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS;
|
||||
String.join(",",
|
||||
"secret$",
|
||||
"password$",
|
||||
"ssl.keystore.pass$",
|
||||
"fs.s3.*[Ss]ecret.?[Kk]ey",
|
||||
"fs.s3a.*.server-side-encryption.key",
|
||||
"fs.azure\\.account.key.*",
|
||||
"credential$",
|
||||
"oauth.*token$",
|
||||
HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS);
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,13 @@ public enum CreateFlag {
|
||||
* 'local' means the same host as the client is being run on.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HBase"})
|
||||
NO_LOCAL_WRITE((short) 0x40);
|
||||
NO_LOCAL_WRITE((short) 0x40),
|
||||
|
||||
/**
|
||||
* Enforce the file to be a replicated file, no matter what its parent
|
||||
* directory's replication or erasure coding policy is.
|
||||
*/
|
||||
SHOULD_REPLICATE((short) 0x80);
|
||||
|
||||
private final short mode;
|
||||
|
||||
|
@ -30,7 +30,7 @@
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class FSDataOutputStream extends DataOutputStream
|
||||
implements Syncable, CanSetDropBehind {
|
||||
implements Syncable, CanSetDropBehind, StreamCapabilities {
|
||||
private final OutputStream wrappedStream;
|
||||
|
||||
private static class PositionCache extends FilterOutputStream {
|
||||
@ -111,6 +111,14 @@ public OutputStream getWrappedStream() {
|
||||
return wrappedStream;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasCapability(String capability) {
|
||||
if (wrappedStream instanceof StreamCapabilities) {
|
||||
return ((StreamCapabilities) wrappedStream).hasCapability(capability);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override // Syncable
|
||||
public void hflush() throws IOException {
|
||||
if (wrappedStream instanceof Syncable) {
|
||||
|
@ -798,7 +798,7 @@ protected void checkPath(Path path) {
|
||||
*
|
||||
* The default implementation returns an array containing one element:
|
||||
* <pre>
|
||||
* BlockLocation( { "localhost:50010" }, { "localhost" }, 0, file.getLen())
|
||||
* BlockLocation( { "localhost:9866" }, { "localhost" }, 0, file.getLen())
|
||||
* </pre>>
|
||||
*
|
||||
* @param file FilesStatus to get data from
|
||||
|
@ -492,11 +492,12 @@ private static Path checkDest(String srcName, FileSystem dstFS, Path dst,
|
||||
if (null != sdst) {
|
||||
if (sdst.isDirectory()) {
|
||||
if (null == srcName) {
|
||||
throw new IOException("Target " + dst + " is a directory");
|
||||
throw new PathIsDirectoryException(dst.toString());
|
||||
}
|
||||
return checkDest(null, dstFS, new Path(dst, srcName), overwrite);
|
||||
} else if (!overwrite) {
|
||||
throw new IOException("Target " + dst + " already exists");
|
||||
throw new PathExistsException(dst.toString(),
|
||||
"Target " + dst + " already exists");
|
||||
}
|
||||
}
|
||||
return dst;
|
||||
|
@ -552,7 +552,7 @@ private boolean mkdirsWithOptionalPermission(Path f, FsPermission permission)
|
||||
}
|
||||
}
|
||||
if (p2f.exists() && !p2f.isDirectory()) {
|
||||
throw new FileNotFoundException("Destination exists" +
|
||||
throw new FileAlreadyExistsException("Destination exists" +
|
||||
" and is not a directory: " + p2f.getCanonicalPath());
|
||||
}
|
||||
return (parent == null || parent2f.exists() || mkdirs(parent)) &&
|
||||
|
@ -0,0 +1,67 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* Interface to query streams for supported capabilities.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public interface StreamCapabilities {
|
||||
/**
|
||||
* Capabilities that a stream can support and be queried for.
|
||||
*/
|
||||
enum StreamCapability {
|
||||
/**
|
||||
* Stream hflush capability to flush out the data in client's buffer.
|
||||
* Streams with this capability implement {@link Syncable} and support
|
||||
* {@link Syncable#hflush()}.
|
||||
*/
|
||||
HFLUSH("hflush"),
|
||||
|
||||
/**
|
||||
* Stream hsync capability to flush out the data in client's buffer and
|
||||
* the disk device. Streams with this capability implement {@link Syncable}
|
||||
* and support {@link Syncable#hsync()}.
|
||||
*/
|
||||
HSYNC("hsync");
|
||||
|
||||
private final String capability;
|
||||
|
||||
StreamCapability(String value) {
|
||||
this.capability = value;
|
||||
}
|
||||
|
||||
public final String getValue() {
|
||||
return capability;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Query the stream for a specific capability.
|
||||
*
|
||||
* @param capability string to query the stream support for.
|
||||
* @return True if the stream supports capability.
|
||||
*/
|
||||
boolean hasCapability(String capability);
|
||||
}
|
||||
|
@ -0,0 +1,153 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.http;
|
||||
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PositionedReadable;
|
||||
import org.apache.hadoop.fs.Seekable;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
||||
import java.io.FilterInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URI;
|
||||
import java.net.URLConnection;
|
||||
|
||||
abstract class AbstractHttpFileSystem extends FileSystem {
|
||||
private static final long DEFAULT_BLOCK_SIZE = 4096;
|
||||
private static final Path WORKING_DIR = new Path("/");
|
||||
|
||||
private URI uri;
|
||||
|
||||
@Override
|
||||
public void initialize(URI name, Configuration conf) throws IOException {
|
||||
super.initialize(name, conf);
|
||||
this.uri = name;
|
||||
}
|
||||
|
||||
public abstract String getScheme();
|
||||
|
||||
@Override
|
||||
public URI getUri() {
|
||||
return uri;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
|
||||
URLConnection conn = path.toUri().toURL().openConnection();
|
||||
InputStream in = conn.getInputStream();
|
||||
return new FSDataInputStream(new HttpDataInputStream(in));
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataOutputStream create(Path path, FsPermission fsPermission,
|
||||
boolean b, int i, short i1, long l,
|
||||
Progressable progressable)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataOutputStream append(Path path, int i, Progressable progressable)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean rename(Path path, Path path1) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean delete(Path path, boolean b) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileStatus[] listStatus(Path path) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setWorkingDirectory(Path path) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path getWorkingDirectory() {
|
||||
return WORKING_DIR;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean mkdirs(Path path, FsPermission fsPermission)
|
||||
throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileStatus getFileStatus(Path path) throws IOException {
|
||||
return new FileStatus(-1, false, 1, DEFAULT_BLOCK_SIZE, 0, path);
|
||||
}
|
||||
|
||||
private static class HttpDataInputStream extends FilterInputStream
|
||||
implements Seekable, PositionedReadable {
|
||||
|
||||
HttpDataInputStream(InputStream in) {
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(long position, byte[] buffer, int offset, int length)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFully(long position, byte[] buffer, int offset, int length)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFully(long position, byte[] buffer) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void seek(long pos) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getPos() throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean seekToNewSource(long targetPos) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.http;
|
||||
|
||||
/**
|
||||
* A Filesystem that reads from HTTP endpoint.
|
||||
*/
|
||||
public class HttpFileSystem extends AbstractHttpFileSystem {
|
||||
@Override
|
||||
public String getScheme() {
|
||||
return "http";
|
||||
}
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.http;
|
||||
|
||||
/**
|
||||
* A Filesystem that reads from HTTPS endpoint.
|
||||
*/
|
||||
public class HttpsFileSystem extends AbstractHttpFileSystem {
|
||||
@Override
|
||||
public String getScheme() {
|
||||
return "https";
|
||||
}
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Filesystem implementations that allow Hadoop to read directly from
|
||||
* HTTP / HTTPS endpoints.
|
||||
*/
|
||||
package org.apache.hadoop.fs.http;
|
@ -69,7 +69,8 @@ public class NodeFencer {
|
||||
private static final Map<String, Class<? extends FenceMethod>> STANDARD_METHODS =
|
||||
ImmutableMap.<String, Class<? extends FenceMethod>>of(
|
||||
"shell", ShellCommandFencer.class,
|
||||
"sshfence", SshFenceByTcpPort.class);
|
||||
"sshfence", SshFenceByTcpPort.class,
|
||||
"powershell", PowerShellFencer.class);
|
||||
|
||||
private final List<FenceMethodWithArg> methods;
|
||||
|
||||
|
@ -0,0 +1,154 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.ha;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Fencer method that uses PowerShell to remotely connect to a machine and kill
|
||||
* the required process. This only works in Windows.
|
||||
*
|
||||
* The argument passed to this fencer should be a unique string in the
|
||||
* "CommandLine" attribute for the "java.exe" process. For example, the full
|
||||
* path for the Namenode: "org.apache.hadoop.hdfs.server.namenode.NameNode".
|
||||
* The administrator can also shorten the name to "Namenode" if it's unique.
|
||||
*/
|
||||
public class PowerShellFencer extends Configured implements FenceMethod {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(PowerShellFencer.class);
|
||||
|
||||
|
||||
@Override
|
||||
public void checkArgs(String argStr) throws BadFencingConfigurationException {
|
||||
LOG.info("The parameter for the PowerShell fencer is " + argStr);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryFence(HAServiceTarget target, String argsStr)
|
||||
throws BadFencingConfigurationException {
|
||||
|
||||
String processName = argsStr;
|
||||
InetSocketAddress serviceAddr = target.getAddress();
|
||||
String hostname = serviceAddr.getHostName();
|
||||
|
||||
// Use PowerShell to kill a remote process
|
||||
String ps1script = buildPSScript(processName, hostname);
|
||||
if (ps1script == null) {
|
||||
LOG.error("Cannot build PowerShell script");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Execute PowerShell script
|
||||
LOG.info("Executing " + ps1script);
|
||||
ProcessBuilder builder = new ProcessBuilder("powershell.exe", ps1script);
|
||||
Process p = null;
|
||||
try {
|
||||
p = builder.start();
|
||||
p.getOutputStream().close();
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to execute " + ps1script, e);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Pump logs to stderr
|
||||
StreamPumper errPumper = new StreamPumper(
|
||||
LOG, "fencer", p.getErrorStream(), StreamPumper.StreamType.STDERR);
|
||||
errPumper.start();
|
||||
|
||||
StreamPumper outPumper = new StreamPumper(
|
||||
LOG, "fencer", p.getInputStream(), StreamPumper.StreamType.STDOUT);
|
||||
outPumper.start();
|
||||
|
||||
// Waiting for the process to finish
|
||||
int rc = 0;
|
||||
try {
|
||||
rc = p.waitFor();
|
||||
errPumper.join();
|
||||
outPumper.join();
|
||||
} catch (InterruptedException ie) {
|
||||
LOG.warn("Interrupted while waiting for fencing command: " + ps1script);
|
||||
return false;
|
||||
}
|
||||
|
||||
return rc == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a PowerShell script to kill a java.exe process in a remote machine.
|
||||
*
|
||||
* @param processName Name of the process to kill. This is an attribute in
|
||||
* CommandLine.
|
||||
* @param host Host where the process is.
|
||||
* @return Path of the PowerShell script.
|
||||
*/
|
||||
private String buildPSScript(final String processName, final String host) {
|
||||
LOG.info(
|
||||
"Building PowerShell script to kill " + processName + " at " + host);
|
||||
String ps1script = null;
|
||||
BufferedWriter writer = null;
|
||||
try {
|
||||
File file = File.createTempFile("temp-fence-command", ".ps1");
|
||||
file.deleteOnExit();
|
||||
FileOutputStream fos = new FileOutputStream(file, false);
|
||||
OutputStreamWriter osw =
|
||||
new OutputStreamWriter(fos, StandardCharsets.UTF_8);
|
||||
writer = new BufferedWriter(osw);
|
||||
|
||||
// Filter to identify the Namenode process
|
||||
String filter = StringUtils.join(" and ", new String[] {
|
||||
"Name LIKE '%java.exe%'",
|
||||
"CommandLine LIKE '%" + processName+ "%'"});
|
||||
|
||||
// Identify the process
|
||||
String cmd = "Get-WmiObject Win32_Process";
|
||||
cmd += " -Filter \"" + filter + "\"";
|
||||
// Remote location
|
||||
cmd += " -Computer " + host;
|
||||
// Kill it
|
||||
cmd += " |% { $_.Terminate() }";
|
||||
|
||||
LOG.info("PowerShell command: " + cmd);
|
||||
writer.write(cmd);
|
||||
writer.flush();
|
||||
|
||||
ps1script = file.getAbsolutePath();
|
||||
} catch (IOException ioe) {
|
||||
LOG.error("Cannot create PowerShell script", ioe);
|
||||
} finally {
|
||||
if (writer != null) {
|
||||
try {
|
||||
writer.close();
|
||||
} catch (IOException ioe) {
|
||||
LOG.error("Cannot close PowerShell script", ioe);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ps1script;
|
||||
}
|
||||
}
|
@ -73,6 +73,7 @@
|
||||
import org.apache.hadoop.security.ssl.SSLFactory;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.eclipse.jetty.http.HttpVersion;
|
||||
import org.eclipse.jetty.server.ConnectionFactory;
|
||||
import org.eclipse.jetty.server.Connector;
|
||||
@ -347,18 +348,17 @@ public Builder setXFrameOption(String option) {
|
||||
|
||||
/**
|
||||
* A wrapper of {@link Configuration#getPassword(String)}. It returns
|
||||
* <code>String</code> instead of <code>char[]</code> and throws
|
||||
* {@link IOException} when the password not found.
|
||||
* <code>String</code> instead of <code>char[]</code>.
|
||||
*
|
||||
* @param conf the configuration
|
||||
* @param name the property name
|
||||
* @return the password string
|
||||
* @return the password string or null
|
||||
*/
|
||||
private static String getPassword(Configuration conf, String name)
|
||||
private static String getPasswordString(Configuration conf, String name)
|
||||
throws IOException {
|
||||
char[] passchars = conf.getPassword(name);
|
||||
if (passchars == null) {
|
||||
throw new IOException("Password " + name + " not found");
|
||||
return null;
|
||||
}
|
||||
return new String(passchars);
|
||||
}
|
||||
@ -370,19 +370,30 @@ private void loadSSLConfiguration() throws IOException {
|
||||
if (sslConf == null) {
|
||||
return;
|
||||
}
|
||||
needsClientAuth(sslConf.getBoolean(
|
||||
needsClientAuth = sslConf.getBoolean(
|
||||
SSLFactory.SSL_SERVER_NEED_CLIENT_AUTH,
|
||||
SSLFactory.SSL_SERVER_NEED_CLIENT_AUTH_DEFAULT));
|
||||
keyStore(sslConf.get(SSLFactory.SSL_SERVER_KEYSTORE_LOCATION),
|
||||
getPassword(sslConf, SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD),
|
||||
sslConf.get(SSLFactory.SSL_SERVER_KEYSTORE_TYPE,
|
||||
SSLFactory.SSL_SERVER_KEYSTORE_TYPE_DEFAULT));
|
||||
keyPassword(getPassword(sslConf,
|
||||
SSLFactory.SSL_SERVER_KEYSTORE_KEYPASSWORD));
|
||||
trustStore(sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_LOCATION),
|
||||
getPassword(sslConf, SSLFactory.SSL_SERVER_TRUSTSTORE_PASSWORD),
|
||||
sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE,
|
||||
SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE_DEFAULT));
|
||||
SSLFactory.SSL_SERVER_NEED_CLIENT_AUTH_DEFAULT);
|
||||
keyStore = sslConf.getTrimmed(SSLFactory.SSL_SERVER_KEYSTORE_LOCATION);
|
||||
if (keyStore == null || keyStore.isEmpty()) {
|
||||
throw new IOException(String.format("Property %s not specified",
|
||||
SSLFactory.SSL_SERVER_KEYSTORE_LOCATION));
|
||||
}
|
||||
keyStorePassword = getPasswordString(sslConf,
|
||||
SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD);
|
||||
if (keyStorePassword == null) {
|
||||
throw new IOException(String.format("Property %s not specified",
|
||||
SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD));
|
||||
}
|
||||
keyStoreType = sslConf.get(SSLFactory.SSL_SERVER_KEYSTORE_TYPE,
|
||||
SSLFactory.SSL_SERVER_KEYSTORE_TYPE_DEFAULT);
|
||||
keyPassword = getPasswordString(sslConf,
|
||||
SSLFactory.SSL_SERVER_KEYSTORE_KEYPASSWORD);
|
||||
trustStore = sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_LOCATION);
|
||||
trustStorePassword = getPasswordString(sslConf,
|
||||
SSLFactory.SSL_SERVER_TRUSTSTORE_PASSWORD);
|
||||
trustStoreType = sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE,
|
||||
SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE_DEFAULT);
|
||||
excludeCiphers = sslConf.get(SSLFactory.SSL_SERVER_EXCLUDE_CIPHER_LIST);
|
||||
}
|
||||
|
||||
public HttpServer2 build() throws IOException {
|
||||
@ -471,7 +482,8 @@ private ServerConnector createHttpsChannelConnector(
|
||||
sslContextFactory.setTrustStorePassword(trustStorePassword);
|
||||
}
|
||||
if(null != excludeCiphers && !excludeCiphers.isEmpty()) {
|
||||
sslContextFactory.setExcludeCipherSuites(excludeCiphers.split(","));
|
||||
sslContextFactory.setExcludeCipherSuites(
|
||||
StringUtils.getTrimmedStrings(excludeCiphers));
|
||||
LOG.info("Excluded Cipher List:" + excludeCiphers);
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,176 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io.erasurecode;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.ServiceLoader;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* This class registers all coder implementations.
|
||||
*
|
||||
* {@link CodecRegistry} maps codec names to coder factories. All coder
|
||||
* factories are dynamically identified and loaded using ServiceLoader.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public final class CodecRegistry {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(CodecRegistry.class);
|
||||
|
||||
private static CodecRegistry instance = new CodecRegistry();
|
||||
|
||||
public static CodecRegistry getInstance() {
|
||||
return instance;
|
||||
}
|
||||
|
||||
private Map<String, List<RawErasureCoderFactory>> coderMap;
|
||||
|
||||
private Map<String, String[]> coderNameMap;
|
||||
|
||||
private CodecRegistry() {
|
||||
coderMap = new HashMap<>();
|
||||
coderNameMap = new HashMap<>();
|
||||
final ServiceLoader<RawErasureCoderFactory> coderFactories =
|
||||
ServiceLoader.load(RawErasureCoderFactory.class);
|
||||
updateCoders(coderFactories);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update coderMap and coderNameMap with iterable type of coder factories.
|
||||
* @param coderFactories
|
||||
*/
|
||||
@VisibleForTesting
|
||||
void updateCoders(Iterable<RawErasureCoderFactory> coderFactories) {
|
||||
for (RawErasureCoderFactory coderFactory : coderFactories) {
|
||||
String codecName = coderFactory.getCodecName();
|
||||
List<RawErasureCoderFactory> coders = coderMap.get(codecName);
|
||||
if (coders == null) {
|
||||
coders = new ArrayList<>();
|
||||
coders.add(coderFactory);
|
||||
coderMap.put(codecName, coders);
|
||||
LOG.debug("Codec registered: codec = {}, coder = {}",
|
||||
coderFactory.getCodecName(), coderFactory.getCoderName());
|
||||
} else {
|
||||
Boolean hasConflit = false;
|
||||
for (RawErasureCoderFactory coder : coders) {
|
||||
if (coder.getCoderName().equals(coderFactory.getCoderName())) {
|
||||
hasConflit = true;
|
||||
LOG.error("Coder {} cannot be registered because its coder name " +
|
||||
"{} has conflict with {}", coderFactory.getClass().getName(),
|
||||
coderFactory.getCoderName(), coder.getClass().getName());
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!hasConflit) {
|
||||
// set native coders as default if user does not
|
||||
// specify a fallback order
|
||||
if (coderFactory instanceof NativeRSRawErasureCoderFactory ||
|
||||
coderFactory instanceof NativeXORRawErasureCoderFactory) {
|
||||
coders.add(0, coderFactory);
|
||||
} else {
|
||||
coders.add(coderFactory);
|
||||
}
|
||||
LOG.debug("Codec registered: codec = {}, coder = {}",
|
||||
coderFactory.getCodecName(), coderFactory.getCoderName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update coderNameMap accordingly
|
||||
coderNameMap.clear();
|
||||
for (Map.Entry<String, List<RawErasureCoderFactory>> entry :
|
||||
coderMap.entrySet()) {
|
||||
String codecName = entry.getKey();
|
||||
List<RawErasureCoderFactory> coders = entry.getValue();
|
||||
coderNameMap.put(codecName, coders.stream().
|
||||
map(RawErasureCoderFactory::getCoderName).
|
||||
collect(Collectors.toList()).toArray(new String[0]));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all coder names of the given codec.
|
||||
* @param codecName the name of codec
|
||||
* @return an array of all coder names
|
||||
*/
|
||||
public String[] getCoderNames(String codecName) {
|
||||
String[] coderNames = coderNameMap.get(codecName);
|
||||
if (coderNames == null) {
|
||||
throw new IllegalArgumentException("No available raw coder factory for "
|
||||
+ codecName);
|
||||
}
|
||||
return coderNames;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all coder factories of the given codec.
|
||||
* @param codecName the name of codec
|
||||
* @return a list of all coder factories
|
||||
*/
|
||||
public List<RawErasureCoderFactory> getCoders(String codecName) {
|
||||
List<RawErasureCoderFactory> coders = coderMap.get(codecName);
|
||||
if (coders == null) {
|
||||
throw new IllegalArgumentException("No available raw coder factory for "
|
||||
+ codecName);
|
||||
}
|
||||
return coders;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all codec names.
|
||||
* @return a set of all codec names
|
||||
*/
|
||||
public Set<String> getCodecNames() {
|
||||
return coderMap.keySet();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific coder factory defined by codec name and coder name.
|
||||
* @param codecName name of the codec
|
||||
* @param coderName name of the coder
|
||||
* @return the specific coder
|
||||
*/
|
||||
public RawErasureCoderFactory getCoderByName(
|
||||
String codecName, String coderName) {
|
||||
List<RawErasureCoderFactory> coders = getCoders(codecName);
|
||||
|
||||
// find the RawErasureCoderFactory with the name of coderName
|
||||
for (RawErasureCoderFactory coder : coders) {
|
||||
if (coder.getCoderName().equals(coderName)) {
|
||||
return coder;
|
||||
}
|
||||
}
|
||||
|
||||
// if not found, throw exception
|
||||
throw new IllegalArgumentException("No implementation for coder "
|
||||
+ coderName + " of codec " + codecName);
|
||||
}
|
||||
}
|
@ -18,8 +18,6 @@
|
||||
package org.apache.hadoop.io.erasurecode;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Splitter;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
@ -30,18 +28,12 @@
|
||||
import org.apache.hadoop.io.erasurecode.codec.XORErasureCodec;
|
||||
import org.apache.hadoop.io.erasurecode.coder.ErasureDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.coder.ErasureEncoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactoryLegacy;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory;
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A codec & coder utility to help create coders conveniently.
|
||||
@ -79,27 +71,12 @@ public final class CodecUtil {
|
||||
/** Comma separated raw codec name. The first coder is prior to the latter. */
|
||||
public static final String IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODERS_KEY =
|
||||
IO_ERASURECODE_CODEC + "rs-legacy.rawcoders";
|
||||
public static final String IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODERS_DEFAULT =
|
||||
RSRawErasureCoderFactoryLegacy.class.getCanonicalName();
|
||||
public static final String IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY =
|
||||
IO_ERASURECODE_CODEC + "rs.rawcoders";
|
||||
public static final String IO_ERASURECODE_CODEC_RS_RAWCODERS_DEFAULT =
|
||||
NativeRSRawErasureCoderFactory.class.getCanonicalName() +
|
||||
"," + RSRawErasureCoderFactory.class.getCanonicalName();
|
||||
|
||||
/** Raw coder factory for the XOR codec. */
|
||||
public static final String IO_ERASURECODE_CODEC_XOR_RAWCODERS_KEY =
|
||||
IO_ERASURECODE_CODEC + "xor.rawcoders";
|
||||
public static final String IO_ERASURECODE_CODEC_XOR_RAWCODERS_DEFAULT =
|
||||
NativeXORRawErasureCoderFactory.class.getCanonicalName() +
|
||||
"," + XORRawErasureCoderFactory.class.getCanonicalName();
|
||||
|
||||
// Default coders for each codec names.
|
||||
public static final Map<String, String> DEFAULT_CODERS_MAP = ImmutableMap.of(
|
||||
"rs", IO_ERASURECODE_CODEC_RS_RAWCODERS_DEFAULT,
|
||||
"rs-legacy", IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODERS_DEFAULT,
|
||||
"xor", IO_ERASURECODE_CODEC_XOR_RAWCODERS_DEFAULT
|
||||
);
|
||||
|
||||
private CodecUtil() { }
|
||||
|
||||
@ -168,70 +145,61 @@ public static RawErasureDecoder createRawDecoder(
|
||||
}
|
||||
|
||||
private static RawErasureCoderFactory createRawCoderFactory(
|
||||
Configuration conf, String rawCoderFactoryKey) {
|
||||
String coderName, String codecName) {
|
||||
RawErasureCoderFactory fact;
|
||||
try {
|
||||
Class<? extends RawErasureCoderFactory> factClass = conf.getClassByName(
|
||||
rawCoderFactoryKey).asSubclass(RawErasureCoderFactory.class);
|
||||
fact = factClass.newInstance();
|
||||
} catch (ClassNotFoundException | InstantiationException |
|
||||
IllegalAccessException e) {
|
||||
throw new RuntimeException("Failed to create raw coder factory", e);
|
||||
}
|
||||
|
||||
if (fact == null) {
|
||||
throw new RuntimeException("Failed to create raw coder factory");
|
||||
}
|
||||
fact = CodecRegistry.getInstance().
|
||||
getCoderByName(codecName, coderName);
|
||||
|
||||
return fact;
|
||||
}
|
||||
|
||||
// Return comma separated coder names
|
||||
private static String getRawCoders(Configuration conf, String codec) {
|
||||
return conf.get(
|
||||
IO_ERASURECODE_CODEC + codec + ".rawcoders",
|
||||
DEFAULT_CODERS_MAP.getOrDefault(codec, codec)
|
||||
// Return a list of coder names
|
||||
private static String[] getRawCoderNames(
|
||||
Configuration conf, String codecName) {
|
||||
return conf.getStrings(
|
||||
IO_ERASURECODE_CODEC + codecName + ".rawcoders",
|
||||
CodecRegistry.getInstance().getCoderNames(codecName)
|
||||
);
|
||||
}
|
||||
|
||||
private static RawErasureEncoder createRawEncoderWithFallback(
|
||||
Configuration conf, String codec, ErasureCoderOptions coderOptions) {
|
||||
String coders = getRawCoders(conf, codec);
|
||||
for (String factName : Splitter.on(",").split(coders)) {
|
||||
Configuration conf, String codecName, ErasureCoderOptions coderOptions) {
|
||||
String[] rawCoderNames = getRawCoderNames(conf, codecName);
|
||||
for (String rawCoderName : rawCoderNames) {
|
||||
try {
|
||||
if (factName != null) {
|
||||
RawErasureCoderFactory fact = createRawCoderFactory(conf,
|
||||
factName);
|
||||
if (rawCoderName != null) {
|
||||
RawErasureCoderFactory fact = createRawCoderFactory(
|
||||
rawCoderName, codecName);
|
||||
return fact.createEncoder(coderOptions);
|
||||
}
|
||||
} catch (LinkageError | Exception e) {
|
||||
// Fallback to next coder if possible
|
||||
LOG.warn("Failed to create raw erasure encoder " + factName +
|
||||
LOG.warn("Failed to create raw erasure encoder " + rawCoderName +
|
||||
", fallback to next codec if possible", e);
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("Fail to create raw erasure " +
|
||||
"encoder with given codec: " + codec);
|
||||
"encoder with given codec: " + codecName);
|
||||
}
|
||||
|
||||
private static RawErasureDecoder createRawDecoderWithFallback(
|
||||
Configuration conf, String codec, ErasureCoderOptions coderOptions) {
|
||||
String coders = getRawCoders(conf, codec);
|
||||
for (String factName : Splitter.on(",").split(coders)) {
|
||||
Configuration conf, String codecName, ErasureCoderOptions coderOptions) {
|
||||
String[] coders = getRawCoderNames(conf, codecName);
|
||||
for (String rawCoderName : coders) {
|
||||
try {
|
||||
if (factName != null) {
|
||||
RawErasureCoderFactory fact = createRawCoderFactory(conf,
|
||||
factName);
|
||||
if (rawCoderName != null) {
|
||||
RawErasureCoderFactory fact = createRawCoderFactory(
|
||||
rawCoderName, codecName);
|
||||
return fact.createDecoder(coderOptions);
|
||||
}
|
||||
} catch (LinkageError | Exception e) {
|
||||
// Fallback to next coder if possible
|
||||
LOG.warn("Failed to create raw erasure decoder " + factName +
|
||||
LOG.warn("Failed to create raw erasure decoder " + rawCoderName +
|
||||
", fallback to next codec if possible", e);
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("Fail to create raw erasure " +
|
||||
"encoder with given codec: " + codec);
|
||||
"encoder with given codec: " + codecName);
|
||||
}
|
||||
|
||||
private static ErasureCodec createCodec(Configuration conf,
|
||||
|
@ -21,6 +21,8 @@
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.lang.builder.EqualsBuilder;
|
||||
import org.apache.commons.lang.builder.HashCodeBuilder;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
@ -197,36 +199,34 @@ public String toString() {
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
// Todo: Further use `extraOptions` to compare ECSchemas
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
if (o == null) {
|
||||
return false;
|
||||
}
|
||||
if (o == this) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
if (o.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ECSchema ecSchema = (ECSchema) o;
|
||||
|
||||
if (numDataUnits != ecSchema.numDataUnits) {
|
||||
return false;
|
||||
}
|
||||
if (numParityUnits != ecSchema.numParityUnits) {
|
||||
return false;
|
||||
}
|
||||
if (!codecName.equals(ecSchema.codecName)) {
|
||||
return false;
|
||||
}
|
||||
return extraOptions.equals(ecSchema.extraOptions);
|
||||
ECSchema rhs = (ECSchema) o;
|
||||
return new EqualsBuilder()
|
||||
.append(codecName, rhs.codecName)
|
||||
.append(extraOptions, rhs.extraOptions)
|
||||
.append(numDataUnits, rhs.numDataUnits)
|
||||
.append(numParityUnits, rhs.numParityUnits)
|
||||
.isEquals();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = codecName.hashCode();
|
||||
result = 31 * result + extraOptions.hashCode();
|
||||
result = 31 * result + numDataUnits;
|
||||
result = 31 * result + numParityUnits;
|
||||
|
||||
return result;
|
||||
return new HashCodeBuilder(1273158869, 1555022101)
|
||||
.append(codecName)
|
||||
.append(extraOptions)
|
||||
.append(numDataUnits)
|
||||
.append(numParityUnits)
|
||||
.toHashCode();
|
||||
}
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ public final class ErasureCodeConstants {
|
||||
private ErasureCodeConstants() {
|
||||
}
|
||||
|
||||
public static final String DUMMY_CODEC_NAME = "dummy";
|
||||
public static final String RS_CODEC_NAME = "rs";
|
||||
public static final String RS_LEGACY_CODEC_NAME = "rs-legacy";
|
||||
public static final String XOR_CODEC_NAME = "xor";
|
||||
|
@ -18,6 +18,7 @@
|
||||
package org.apache.hadoop.io.erasurecode.rawcoder;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
|
||||
|
||||
/**
|
||||
@ -25,6 +26,7 @@
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class DummyRawErasureCoderFactory implements RawErasureCoderFactory {
|
||||
public static final String CODER_NAME = "dummy_dummy";
|
||||
|
||||
@Override
|
||||
public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
|
||||
@ -35,4 +37,14 @@ public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
|
||||
public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
|
||||
return new DummyRawDecoder(coderOptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCoderName() {
|
||||
return CODER_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCodecName() {
|
||||
return ErasureCodeConstants.DUMMY_CODEC_NAME;
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
package org.apache.hadoop.io.erasurecode.rawcoder;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
|
||||
|
||||
/**
|
||||
@ -27,6 +28,8 @@
|
||||
@InterfaceAudience.Private
|
||||
public class NativeRSRawErasureCoderFactory implements RawErasureCoderFactory {
|
||||
|
||||
public static final String CODER_NAME = "rs_native";
|
||||
|
||||
@Override
|
||||
public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
|
||||
return new NativeRSRawEncoder(coderOptions);
|
||||
@ -36,4 +39,14 @@ public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
|
||||
public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
|
||||
return new NativeRSRawDecoder(coderOptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCoderName() {
|
||||
return CODER_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCodecName() {
|
||||
return ErasureCodeConstants.RS_CODEC_NAME;
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
package org.apache.hadoop.io.erasurecode.rawcoder;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
|
||||
|
||||
/**
|
||||
@ -27,6 +28,8 @@
|
||||
@InterfaceAudience.Private
|
||||
public class NativeXORRawErasureCoderFactory implements RawErasureCoderFactory {
|
||||
|
||||
public static final String CODER_NAME = "xor_native";
|
||||
|
||||
@Override
|
||||
public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
|
||||
return new NativeXORRawEncoder(coderOptions);
|
||||
@ -36,4 +39,14 @@ public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
|
||||
public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
|
||||
return new NativeXORRawDecoder(coderOptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCoderName() {
|
||||
return CODER_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCodecName() {
|
||||
return ErasureCodeConstants.XOR_CODEC_NAME;
|
||||
}
|
||||
}
|
||||
|
@ -34,12 +34,12 @@
|
||||
* addressed in HADOOP-11871.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class RSRawDecoderLegacy extends RawErasureDecoder {
|
||||
public class RSLegacyRawDecoder extends RawErasureDecoder {
|
||||
// To describe and calculate the needed Vandermonde matrix
|
||||
private int[] errSignature;
|
||||
private int[] primitivePower;
|
||||
|
||||
public RSRawDecoderLegacy(ErasureCoderOptions coderOptions) {
|
||||
public RSLegacyRawDecoder(ErasureCoderOptions coderOptions) {
|
||||
super(coderOptions);
|
||||
if (getNumAllUnits() >= RSUtil.GF.getFieldSize()) {
|
||||
throw new HadoopIllegalArgumentException(
|
@ -30,10 +30,10 @@
|
||||
* when possible.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class RSRawEncoderLegacy extends RawErasureEncoder {
|
||||
public class RSLegacyRawEncoder extends RawErasureEncoder {
|
||||
private int[] generatingPolynomial;
|
||||
|
||||
public RSRawEncoderLegacy(ErasureCoderOptions coderOptions) {
|
||||
public RSLegacyRawEncoder(ErasureCoderOptions coderOptions) {
|
||||
super(coderOptions);
|
||||
|
||||
assert (getNumDataUnits() + getNumParityUnits() < RSUtil.GF.getFieldSize());
|
@ -18,21 +18,34 @@
|
||||
package org.apache.hadoop.io.erasurecode.rawcoder;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
|
||||
|
||||
/**
|
||||
* A raw coder factory for the legacy raw Reed-Solomon coder in Java.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class RSRawErasureCoderFactoryLegacy implements RawErasureCoderFactory {
|
||||
public class RSLegacyRawErasureCoderFactory implements RawErasureCoderFactory {
|
||||
|
||||
public static final String CODER_NAME = "rs-legacy_java";
|
||||
|
||||
@Override
|
||||
public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
|
||||
return new RSRawEncoderLegacy(coderOptions);
|
||||
return new RSLegacyRawEncoder(coderOptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
|
||||
return new RSRawDecoderLegacy(coderOptions);
|
||||
return new RSLegacyRawDecoder(coderOptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCoderName() {
|
||||
return CODER_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCodecName() {
|
||||
return ErasureCodeConstants.RS_LEGACY_CODEC_NAME;
|
||||
}
|
||||
}
|
@ -18,6 +18,7 @@
|
||||
package org.apache.hadoop.io.erasurecode.rawcoder;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
|
||||
|
||||
/**
|
||||
@ -26,6 +27,8 @@
|
||||
@InterfaceAudience.Private
|
||||
public class RSRawErasureCoderFactory implements RawErasureCoderFactory {
|
||||
|
||||
public static final String CODER_NAME = "rs_java";
|
||||
|
||||
@Override
|
||||
public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
|
||||
return new RSRawEncoder(coderOptions);
|
||||
@ -35,4 +38,14 @@ public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
|
||||
public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
|
||||
return new RSRawDecoder(coderOptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCoderName() {
|
||||
return CODER_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCodecName() {
|
||||
return ErasureCodeConstants.RS_CODEC_NAME;
|
||||
}
|
||||
}
|
||||
|
@ -41,4 +41,16 @@ public interface RawErasureCoderFactory {
|
||||
* @return raw erasure decoder
|
||||
*/
|
||||
RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions);
|
||||
|
||||
/**
|
||||
* Get the name of the coder.
|
||||
* @return coder name
|
||||
*/
|
||||
String getCoderName();
|
||||
|
||||
/**
|
||||
* Get the name of its codec.
|
||||
* @return codec name
|
||||
*/
|
||||
String getCodecName();
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
package org.apache.hadoop.io.erasurecode.rawcoder;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
|
||||
|
||||
/**
|
||||
@ -26,6 +27,8 @@
|
||||
@InterfaceAudience.Private
|
||||
public class XORRawErasureCoderFactory implements RawErasureCoderFactory {
|
||||
|
||||
public static final String CODER_NAME = "xor_java";
|
||||
|
||||
@Override
|
||||
public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
|
||||
return new XORRawEncoder(coderOptions);
|
||||
@ -35,4 +38,14 @@ public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
|
||||
public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
|
||||
return new XORRawDecoder(coderOptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCoderName() {
|
||||
return CODER_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCodecName() {
|
||||
return ErasureCodeConstants.XOR_CODEC_NAME;
|
||||
}
|
||||
}
|
||||
|
@ -261,6 +261,7 @@ public synchronized void swapQueue(
|
||||
Class<? extends BlockingQueue<E>> queueClassToUse, int maxSize,
|
||||
String ns, Configuration conf) {
|
||||
int priorityLevels = parseNumLevels(ns, conf);
|
||||
this.scheduler.stop();
|
||||
RpcScheduler newScheduler = createScheduler(schedulerClass, priorityLevels,
|
||||
ns, conf);
|
||||
BlockingQueue<E> newQ = createCallQueueInstance(queueClassToUse,
|
||||
|
@ -33,6 +33,8 @@
|
||||
import java.util.concurrent.atomic.AtomicLongArray;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import javax.management.ObjectName;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.util.concurrent.AtomicDoubleArray;
|
||||
@ -162,6 +164,7 @@ public class DecayRpcScheduler implements RpcScheduler,
|
||||
private final String namespace;
|
||||
private final int topUsersCount; // e.g., report top 10 users' metrics
|
||||
private static final double PRECISION = 0.0001;
|
||||
private MetricsProxy metricsProxy;
|
||||
|
||||
/**
|
||||
* This TimerTask will call decayCurrentCounts until
|
||||
@ -230,9 +233,8 @@ public DecayRpcScheduler(int numLevels, String ns, Configuration conf) {
|
||||
DecayTask task = new DecayTask(this, timer);
|
||||
timer.scheduleAtFixedRate(task, decayPeriodMillis, decayPeriodMillis);
|
||||
|
||||
MetricsProxy prox = MetricsProxy.getInstance(ns, numLevels);
|
||||
prox.setDelegate(this);
|
||||
prox.registerMetrics2Source(ns);
|
||||
metricsProxy = MetricsProxy.getInstance(ns, numLevels);
|
||||
metricsProxy.setDelegate(this);
|
||||
}
|
||||
|
||||
// Load configs
|
||||
@ -671,11 +673,14 @@ public static final class MetricsProxy implements DecayRpcSchedulerMXBean,
|
||||
private WeakReference<DecayRpcScheduler> delegate;
|
||||
private double[] averageResponseTimeDefault;
|
||||
private long[] callCountInLastWindowDefault;
|
||||
private ObjectName decayRpcSchedulerInfoBeanName;
|
||||
|
||||
private MetricsProxy(String namespace, int numLevels) {
|
||||
averageResponseTimeDefault = new double[numLevels];
|
||||
callCountInLastWindowDefault = new long[numLevels];
|
||||
MBeans.register(namespace, "DecayRpcScheduler", this);
|
||||
decayRpcSchedulerInfoBeanName =
|
||||
MBeans.register(namespace, "DecayRpcScheduler", this);
|
||||
this.registerMetrics2Source(namespace);
|
||||
}
|
||||
|
||||
public static synchronized MetricsProxy getInstance(String namespace,
|
||||
@ -689,6 +694,10 @@ public static synchronized MetricsProxy getInstance(String namespace,
|
||||
return mp;
|
||||
}
|
||||
|
||||
public static synchronized void removeInstance(String namespace) {
|
||||
MetricsProxy.INSTANCES.remove(namespace);
|
||||
}
|
||||
|
||||
public void setDelegate(DecayRpcScheduler obj) {
|
||||
this.delegate = new WeakReference<DecayRpcScheduler>(obj);
|
||||
}
|
||||
@ -698,6 +707,14 @@ void registerMetrics2Source(String namespace) {
|
||||
DefaultMetricsSystem.instance().register(name, name, this);
|
||||
}
|
||||
|
||||
void unregisterSource(String namespace) {
|
||||
final String name = "DecayRpcSchedulerMetrics2." + namespace;
|
||||
DefaultMetricsSystem.instance().unregisterSource(name);
|
||||
if (decayRpcSchedulerInfoBeanName != null) {
|
||||
MBeans.unregister(decayRpcSchedulerInfoBeanName);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getSchedulingDecisionSummary() {
|
||||
DecayRpcScheduler scheduler = delegate.get();
|
||||
@ -921,4 +938,10 @@ private Map<Object, Long> getDecayedCallCounts() {
|
||||
}
|
||||
return decayedCallCounts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
metricsProxy.unregisterSource(namespace);
|
||||
MetricsProxy.removeInstance(namespace);
|
||||
}
|
||||
}
|
||||
|
@ -42,4 +42,8 @@ public void addResponseTime(String name, int priorityLevel, int queueTime,
|
||||
public DefaultRpcScheduler(int priorityLevels, String namespace,
|
||||
Configuration conf) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
}
|
||||
}
|
||||
|
@ -32,4 +32,6 @@ public interface RpcScheduler {
|
||||
|
||||
void addResponseTime(String name, int priorityLevel, int queueTime,
|
||||
int processingTime);
|
||||
|
||||
void stop();
|
||||
}
|
||||
|
@ -18,13 +18,14 @@
|
||||
|
||||
package org.apache.hadoop.metrics2;
|
||||
|
||||
import com.google.common.base.MoreObjects;
|
||||
import com.google.common.base.Objects;
|
||||
import static com.google.common.base.Preconditions.*;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import java.util.StringJoiner;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
/**
|
||||
* The immutable metric
|
||||
*/
|
||||
@ -84,10 +85,11 @@ protected MetricsInfo info() {
|
||||
return Objects.hashCode(info, value());
|
||||
}
|
||||
|
||||
@Override public String toString() {
|
||||
return MoreObjects.toStringHelper(this)
|
||||
.add("info", info)
|
||||
.add("value", value())
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
|
||||
.add("info=" + info)
|
||||
.add("value=" + value())
|
||||
.toString();
|
||||
}
|
||||
}
|
||||
|
@ -18,13 +18,14 @@
|
||||
|
||||
package org.apache.hadoop.metrics2;
|
||||
|
||||
import com.google.common.base.MoreObjects;
|
||||
import com.google.common.base.Objects;
|
||||
import static com.google.common.base.Preconditions.*;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import java.util.StringJoiner;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
/**
|
||||
* Immutable tag for metrics (for grouping on host/queue/username etc.)
|
||||
*/
|
||||
@ -81,9 +82,9 @@ public String value() {
|
||||
}
|
||||
|
||||
@Override public String toString() {
|
||||
return MoreObjects.toStringHelper(this)
|
||||
.add("info", info)
|
||||
.add("value", value())
|
||||
return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
|
||||
.add("info=" + info)
|
||||
.add("value=" + value())
|
||||
.toString();
|
||||
}
|
||||
}
|
||||
|
@ -18,12 +18,12 @@
|
||||
|
||||
package org.apache.hadoop.metrics2.impl;
|
||||
|
||||
import com.google.common.base.MoreObjects;
|
||||
import com.google.common.base.Objects;
|
||||
import com.google.common.collect.Iterables;
|
||||
|
||||
import org.apache.hadoop.metrics2.MetricsRecord;
|
||||
|
||||
import java.util.StringJoiner;
|
||||
|
||||
abstract class AbstractMetricsRecord implements MetricsRecord {
|
||||
|
||||
@Override public boolean equals(Object obj) {
|
||||
@ -44,12 +44,12 @@ abstract class AbstractMetricsRecord implements MetricsRecord {
|
||||
}
|
||||
|
||||
@Override public String toString() {
|
||||
return MoreObjects.toStringHelper(this)
|
||||
.add("timestamp", timestamp())
|
||||
.add("name", name())
|
||||
.add("description", description())
|
||||
.add("tags", tags())
|
||||
.add("metrics", Iterables.toString(metrics()))
|
||||
return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
|
||||
.add("timestamp=" + timestamp())
|
||||
.add("name=" + name())
|
||||
.add("description=" + description())
|
||||
.add("tags=" + tags())
|
||||
.add("metrics=" + Iterables.toString(metrics()))
|
||||
.toString();
|
||||
}
|
||||
}
|
||||
|
@ -18,11 +18,11 @@
|
||||
|
||||
package org.apache.hadoop.metrics2.impl;
|
||||
|
||||
import com.google.common.base.MoreObjects;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.metrics2.MetricsInfo;
|
||||
|
||||
import java.util.StringJoiner;
|
||||
|
||||
/**
|
||||
* Metrics system related metrics info instances
|
||||
*/
|
||||
@ -48,8 +48,9 @@ public enum MsInfo implements MetricsInfo {
|
||||
}
|
||||
|
||||
@Override public String toString() {
|
||||
return MoreObjects.toStringHelper(this)
|
||||
.add("name", name()).add("description", desc)
|
||||
return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
|
||||
.add("name=" + name())
|
||||
.add("description=" + desc)
|
||||
.toString();
|
||||
}
|
||||
}
|
||||
|
@ -18,11 +18,13 @@
|
||||
|
||||
package org.apache.hadoop.metrics2.lib;
|
||||
|
||||
import com.google.common.base.MoreObjects;
|
||||
import com.google.common.base.Objects;
|
||||
import static com.google.common.base.Preconditions.*;
|
||||
import org.apache.hadoop.metrics2.MetricsInfo;
|
||||
|
||||
import java.util.StringJoiner;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
/**
|
||||
* Making implementing metric info a little easier
|
||||
*/
|
||||
@ -56,8 +58,9 @@ class MetricsInfoImpl implements MetricsInfo {
|
||||
}
|
||||
|
||||
@Override public String toString() {
|
||||
return MoreObjects.toStringHelper(this)
|
||||
.add("name", name).add("description", description)
|
||||
return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
|
||||
.add("name=" + name)
|
||||
.add("description=" + description)
|
||||
.toString();
|
||||
}
|
||||
}
|
||||
|
@ -18,20 +18,19 @@
|
||||
|
||||
package org.apache.hadoop.metrics2.lib;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.base.MoreObjects;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.metrics2.MetricsInfo;
|
||||
import org.apache.hadoop.metrics2.MetricsException;
|
||||
import org.apache.hadoop.metrics2.MetricsInfo;
|
||||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||
import org.apache.hadoop.metrics2.MetricsTag;
|
||||
import org.apache.hadoop.metrics2.impl.MsInfo;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.StringJoiner;
|
||||
|
||||
/**
|
||||
* An optional metrics registry class for creating and maintaining a
|
||||
* collection of MetricsMutables, making writing metrics source easier.
|
||||
@ -440,9 +439,12 @@ public synchronized void snapshot(MetricsRecordBuilder builder, boolean all) {
|
||||
}
|
||||
}
|
||||
|
||||
@Override public String toString() {
|
||||
return MoreObjects.toStringHelper(this)
|
||||
.add("info", metricsInfo).add("tags", tags()).add("metrics", metrics())
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
|
||||
.add("info=" + metricsInfo.toString())
|
||||
.add("tags=" + tags())
|
||||
.add("metrics=" + metrics())
|
||||
.toString();
|
||||
}
|
||||
|
||||
|
@ -327,7 +327,7 @@ backend that can handle multiple contexts (file, gangalia etc.):
|
||||
<td><pre>
|
||||
myprefix.sink.*.class=org.apache.hadoop.metrics2.sink.FileSink
|
||||
myprefix.sink.file0.context=context0
|
||||
myprefix.sink.file0.filename=context1.out
|
||||
myprefix.sink.file0.filename=context0.out
|
||||
myprefix.sink.file1.context=context1
|
||||
myprefix.sink.file1.filename=context1.out
|
||||
...
|
||||
|
@ -21,7 +21,7 @@
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.metrics2.MetricsInfo;
|
||||
|
||||
import com.google.common.base.MoreObjects;
|
||||
import java.util.StringJoiner;
|
||||
|
||||
/**
|
||||
* JVM and logging related metrics info instances
|
||||
@ -60,8 +60,9 @@ public enum JvmMetricsInfo implements MetricsInfo {
|
||||
@Override public String description() { return desc; }
|
||||
|
||||
@Override public String toString() {
|
||||
return MoreObjects.toStringHelper(this)
|
||||
.add("name", name()).add("description", desc)
|
||||
.toString();
|
||||
return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
|
||||
.add("name=" + name())
|
||||
.add("description=" + desc)
|
||||
.toString();
|
||||
}
|
||||
}
|
||||
|
@ -18,11 +18,7 @@
|
||||
|
||||
package org.apache.hadoop.metrics2.util;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
@ -31,8 +27,11 @@
|
||||
import org.apache.hadoop.metrics2.MetricsRecord;
|
||||
import org.apache.hadoop.metrics2.MetricsTag;
|
||||
|
||||
import com.google.common.base.MoreObjects;
|
||||
import com.google.common.collect.Maps;
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.StringJoiner;
|
||||
|
||||
/**
|
||||
* A metrics cache for sinks that don't support sparse updates.
|
||||
@ -127,8 +126,9 @@ public Set<Map.Entry<String, AbstractMetric>> metricsEntrySet() {
|
||||
}
|
||||
|
||||
@Override public String toString() {
|
||||
return MoreObjects.toStringHelper(this)
|
||||
.add("tags", tags).add("metrics", metrics)
|
||||
return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
|
||||
.add("tags=" + tags)
|
||||
.add("metrics=" + metrics)
|
||||
.toString();
|
||||
}
|
||||
}
|
||||
|
@ -130,8 +130,8 @@ public void add(Node node) {
|
||||
"Not allow to add an inner node: "+NodeBase.getPath(node));
|
||||
}
|
||||
if ((depthOfAllLeaves != -1) && (depthOfAllLeaves != newDepth)) {
|
||||
LOG.error("Error: can't add leaf node " + NodeBase.getPath(node) +
|
||||
" at depth " + newDepth + " to topology:\n" + this.toString());
|
||||
LOG.error("Error: can't add leaf node {} at depth {} to topology:{}\n",
|
||||
NodeBase.getPath(node), newDepth, this);
|
||||
throw new InvalidTopologyException("Failed to add " + NodeBase.getPath(node) +
|
||||
": You cannot have a rack and a non-rack node at the same " +
|
||||
"level of the network topology.");
|
||||
@ -153,7 +153,7 @@ public void add(Node node) {
|
||||
}
|
||||
}
|
||||
}
|
||||
LOG.debug("NetworkTopology became:\n{}", this.toString());
|
||||
LOG.debug("NetworkTopology became:\n{}", this);
|
||||
} finally {
|
||||
netlock.writeLock().unlock();
|
||||
}
|
||||
@ -226,7 +226,7 @@ public void remove(Node node) {
|
||||
numOfRacks--;
|
||||
}
|
||||
}
|
||||
LOG.debug("NetworkTopology became:\n{}", this.toString());
|
||||
LOG.debug("NetworkTopology became:\n{}", this);
|
||||
} finally {
|
||||
netlock.writeLock().unlock();
|
||||
}
|
||||
@ -525,7 +525,7 @@ protected Node chooseRandom(final String scope, String excludedScope,
|
||||
}
|
||||
if (numOfDatanodes == 0) {
|
||||
LOG.debug("Failed to find datanode (scope=\"{}\" excludedScope=\"{}\").",
|
||||
String.valueOf(scope), String.valueOf(excludedScope));
|
||||
scope, excludedScope);
|
||||
return null;
|
||||
}
|
||||
Node ret = null;
|
||||
@ -538,7 +538,7 @@ protected Node chooseRandom(final String scope, String excludedScope,
|
||||
}
|
||||
LOG.debug("Choosing random from {} available nodes on node {},"
|
||||
+ " scope={}, excludedScope={}, excludeNodes={}", availableNodes,
|
||||
innerNode.toString(), scope, excludedScope, excludedNodes);
|
||||
innerNode, scope, excludedScope, excludedNodes);
|
||||
if (availableNodes > 0) {
|
||||
do {
|
||||
int leaveIndex = r.nextInt(numOfDatanodes);
|
||||
|
@ -39,7 +39,6 @@
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
@ -140,13 +139,11 @@ public SSLFactory(Mode mode, Configuration conf) {
|
||||
|
||||
enabledProtocols = conf.getStrings(SSL_ENABLED_PROTOCOLS_KEY,
|
||||
SSL_ENABLED_PROTOCOLS_DEFAULT);
|
||||
String excludeCiphersConf =
|
||||
sslConf.get(SSL_SERVER_EXCLUDE_CIPHER_LIST, "");
|
||||
if (excludeCiphersConf.isEmpty()) {
|
||||
excludeCiphers = new LinkedList<String>();
|
||||
} else {
|
||||
LOG.debug("will exclude cipher suites: {}", excludeCiphersConf);
|
||||
excludeCiphers = Arrays.asList(excludeCiphersConf.split(","));
|
||||
excludeCiphers = Arrays.asList(
|
||||
sslConf.getTrimmedStrings(SSL_SERVER_EXCLUDE_CIPHER_LIST));
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("will exclude cipher suites: {}",
|
||||
StringUtils.join(",", excludeCiphers));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,37 +20,83 @@
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
||||
import org.apache.hadoop.service.launcher.LauncherExitCodes;
|
||||
import org.apache.hadoop.util.ExitCodeProvider;
|
||||
|
||||
/**
|
||||
* Exception that is raised on state change operations.
|
||||
* Exception that can be raised on state change operations, whose
|
||||
* exit code can be explicitly set, determined from that of any nested
|
||||
* cause, or a default value of
|
||||
* {@link LauncherExitCodes#EXIT_SERVICE_LIFECYCLE_EXCEPTION}.
|
||||
*/
|
||||
@Public
|
||||
@Evolving
|
||||
public class ServiceStateException extends RuntimeException {
|
||||
public class ServiceStateException extends RuntimeException implements
|
||||
ExitCodeProvider {
|
||||
|
||||
private static final long serialVersionUID = 1110000352259232646L;
|
||||
|
||||
/**
|
||||
* Exit code.
|
||||
*/
|
||||
private int exitCode ;
|
||||
|
||||
/**
|
||||
* Instantiate
|
||||
* @param message error message
|
||||
*/
|
||||
public ServiceStateException(String message) {
|
||||
super(message);
|
||||
this(message, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiate with a message and cause; if the cause has an exit code
|
||||
* then it is used, otherwise the generic
|
||||
* {@link LauncherExitCodes#EXIT_SERVICE_LIFECYCLE_EXCEPTION} exit code
|
||||
* is used.
|
||||
* @param message exception message
|
||||
* @param cause optional inner cause
|
||||
*/
|
||||
public ServiceStateException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
if(cause instanceof ExitCodeProvider) {
|
||||
this.exitCode = ((ExitCodeProvider) cause).getExitCode();
|
||||
} else {
|
||||
this.exitCode = LauncherExitCodes.EXIT_SERVICE_LIFECYCLE_EXCEPTION;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiate, using the specified exit code as the exit code
|
||||
* of the exception, irrespetive of any exit code supplied by any inner
|
||||
* cause.
|
||||
*
|
||||
* @param exitCode exit code to declare
|
||||
* @param message exception message
|
||||
* @param cause inner cause
|
||||
*/
|
||||
public ServiceStateException(int exitCode,
|
||||
String message,
|
||||
Throwable cause) {
|
||||
this(message, cause);
|
||||
this.exitCode = exitCode;
|
||||
}
|
||||
|
||||
public ServiceStateException(Throwable cause) {
|
||||
super(cause);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getExitCode() {
|
||||
return exitCode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert any exception into a {@link RuntimeException}.
|
||||
* If the caught exception is already of that type, it is typecast to a
|
||||
* {@link RuntimeException} and returned.
|
||||
*
|
||||
* All other exception types are wrapped in a new instance of
|
||||
* ServiceStateException
|
||||
* {@code ServiceStateException}.
|
||||
* @param fault exception or throwable
|
||||
* @return a ServiceStateException to rethrow
|
||||
* @return a {@link RuntimeException} to rethrow
|
||||
*/
|
||||
public static RuntimeException convert(Throwable fault) {
|
||||
if (fault instanceof RuntimeException) {
|
||||
@ -66,10 +112,10 @@ public static RuntimeException convert(Throwable fault) {
|
||||
* {@link RuntimeException} and returned.
|
||||
*
|
||||
* All other exception types are wrapped in a new instance of
|
||||
* ServiceStateException
|
||||
* {@code ServiceStateException}.
|
||||
* @param text text to use if a new exception is created
|
||||
* @param fault exception or throwable
|
||||
* @return a ServiceStateException to rethrow
|
||||
* @return a {@link RuntimeException} to rethrow
|
||||
*/
|
||||
public static RuntimeException convert(String text, Throwable fault) {
|
||||
if (fault instanceof RuntimeException) {
|
||||
|
@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.service.launcher;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.service.AbstractService;
|
||||
|
||||
/**
|
||||
* Subclass of {@link AbstractService} that provides basic implementations
|
||||
* of the {@link LaunchableService} methods.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public abstract class AbstractLaunchableService extends AbstractService
|
||||
implements LaunchableService {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(AbstractLaunchableService.class);
|
||||
|
||||
/**
|
||||
* Construct an instance with the given name.
|
||||
*/
|
||||
protected AbstractLaunchableService(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p>
|
||||
* The base implementation logs all arguments at the debug level,
|
||||
* then returns the passed in config unchanged.
|
||||
*/
|
||||
|
||||
@Override
|
||||
public Configuration bindArgs(Configuration config, List<String> args) throws
|
||||
Exception {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Service {} passed in {} arguments:", getName(), args.size());
|
||||
for (String arg : args) {
|
||||
LOG.debug(arg);
|
||||
}
|
||||
}
|
||||
return config;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p>
|
||||
* The action is to signal success by returning the exit code 0.
|
||||
*/
|
||||
@Override
|
||||
public int execute() throws Exception {
|
||||
return LauncherExitCodes.EXIT_SUCCESS;
|
||||
}
|
||||
}
|
@ -0,0 +1,129 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.service.launcher;
|
||||
|
||||
import java.lang.Thread.UncaughtExceptionHandler;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
||||
import org.apache.hadoop.util.ExitUtil;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
|
||||
/**
|
||||
* This class is intended to be installed by calling
|
||||
* {@link Thread#setDefaultUncaughtExceptionHandler(UncaughtExceptionHandler)}
|
||||
* in the main entry point.
|
||||
*
|
||||
* The base class will always attempt to shut down the process if an Error
|
||||
* was raised; the behavior on a standard Exception, raised outside
|
||||
* process shutdown, is simply to log it.
|
||||
*
|
||||
* (Based on the class {@code YarnUncaughtExceptionHandler})
|
||||
*/
|
||||
@SuppressWarnings("UseOfSystemOutOrSystemErr")
|
||||
@Public
|
||||
@Evolving
|
||||
public class HadoopUncaughtExceptionHandler
|
||||
implements UncaughtExceptionHandler {
|
||||
|
||||
/**
|
||||
* Logger.
|
||||
*/
|
||||
private static final Logger LOG = LoggerFactory.getLogger(
|
||||
HadoopUncaughtExceptionHandler.class);
|
||||
|
||||
/**
|
||||
* Delegate for simple exceptions.
|
||||
*/
|
||||
private final UncaughtExceptionHandler delegate;
|
||||
|
||||
/**
|
||||
* Create an instance delegating to the supplied handler if
|
||||
* the exception is considered "simple".
|
||||
* @param delegate a delegate exception handler.
|
||||
*/
|
||||
public HadoopUncaughtExceptionHandler(UncaughtExceptionHandler delegate) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Basic exception handler -logs simple exceptions, then continues.
|
||||
*/
|
||||
public HadoopUncaughtExceptionHandler() {
|
||||
this(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Uncaught exception handler.
|
||||
* If an error is raised: shutdown
|
||||
* The state of the system is unknown at this point -attempting
|
||||
* a clean shutdown is dangerous. Instead: exit
|
||||
* @param thread thread that failed
|
||||
* @param exception the raised exception
|
||||
*/
|
||||
@Override
|
||||
public void uncaughtException(Thread thread, Throwable exception) {
|
||||
if (ShutdownHookManager.get().isShutdownInProgress()) {
|
||||
LOG.error("Thread {} threw an error during shutdown: {}.",
|
||||
thread.toString(),
|
||||
exception,
|
||||
exception);
|
||||
} else if (exception instanceof Error) {
|
||||
try {
|
||||
LOG.error("Thread {} threw an error: {}. Shutting down",
|
||||
thread.toString(),
|
||||
exception,
|
||||
exception);
|
||||
} catch (Throwable err) {
|
||||
// We don't want to not exit because of an issue with logging
|
||||
}
|
||||
if (exception instanceof OutOfMemoryError) {
|
||||
// After catching an OOM java says it is undefined behavior, so don't
|
||||
// even try to clean up or we can get stuck on shutdown.
|
||||
try {
|
||||
System.err.println("Halting due to Out Of Memory Error...");
|
||||
} catch (Throwable err) {
|
||||
// Again we don't want to exit because of logging issues.
|
||||
}
|
||||
ExitUtil.haltOnOutOfMemory((OutOfMemoryError) exception);
|
||||
} else {
|
||||
// error other than OutOfMemory
|
||||
ExitUtil.ExitException ee =
|
||||
ServiceLauncher.convertToExitException(exception);
|
||||
ExitUtil.terminate(ee.status, ee);
|
||||
}
|
||||
} else {
|
||||
// simple exception in a thread. There's a policy decision here:
|
||||
// terminate the process vs. keep going after a thread has failed
|
||||
// base implementation: do nothing but log
|
||||
LOG.error("Thread {} threw an exception: {}",
|
||||
thread.toString(),
|
||||
exception,
|
||||
exception);
|
||||
if (delegate != null) {
|
||||
delegate.uncaughtException(thread, exception);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,216 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.service.launcher;
|
||||
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.service.Service;
|
||||
import org.apache.hadoop.util.ExitUtil;
|
||||
|
||||
import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_INTERRUPTED;
|
||||
|
||||
/**
|
||||
* Handles interrupts by shutting down a service, escalating if the service
|
||||
* does not shut down in time, or when other interrupts are received.
|
||||
* <ol>
|
||||
* <li>The service is given a time in milliseconds to stop:
|
||||
* if it exceeds this it the process exits anyway.</li>
|
||||
* <li>the exit operation used is {@link ServiceLauncher#exit(int, String)}
|
||||
* with the exit code {@link LauncherExitCodes#EXIT_INTERRUPTED}</li>
|
||||
* <li>If a second shutdown signal is received during the shutdown
|
||||
* process, {@link ExitUtil#halt(int)} is invoked. This handles the
|
||||
* problem of blocking shutdown hooks.</li>
|
||||
* </ol>
|
||||
*
|
||||
*/
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class InterruptEscalator implements IrqHandler.Interrupted {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(
|
||||
InterruptEscalator.class);
|
||||
|
||||
/**
|
||||
* Flag to indicate when a shutdown signal has already been received.
|
||||
* This allows the operation to be escalated.
|
||||
*/
|
||||
private final AtomicBoolean signalAlreadyReceived = new AtomicBoolean(false);
|
||||
|
||||
private final WeakReference<ServiceLauncher> ownerRef;
|
||||
|
||||
private final int shutdownTimeMillis;
|
||||
|
||||
/**
|
||||
* Previous interrupt handlers. These are not queried.
|
||||
*/
|
||||
private final List<IrqHandler> interruptHandlers = new ArrayList<>(2);
|
||||
private boolean forcedShutdownTimedOut;
|
||||
|
||||
public InterruptEscalator(ServiceLauncher owner, int shutdownTimeMillis) {
|
||||
Preconditions.checkArgument(owner != null, "null owner");
|
||||
this.ownerRef = new WeakReference<>(owner);
|
||||
this.shutdownTimeMillis = shutdownTimeMillis;
|
||||
}
|
||||
|
||||
private ServiceLauncher getOwner() {
|
||||
return ownerRef.get();
|
||||
}
|
||||
|
||||
private Service getService() {
|
||||
ServiceLauncher owner = getOwner();
|
||||
return owner != null ? owner.getService() : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("InterruptEscalator{");
|
||||
sb.append(" signalAlreadyReceived=").append(signalAlreadyReceived.get());
|
||||
ServiceLauncher owner = ownerRef.get();
|
||||
if (owner != null) {
|
||||
sb.append(", owner= ").append(owner.toString());
|
||||
}
|
||||
sb.append(", shutdownTimeMillis=").append(shutdownTimeMillis);
|
||||
sb.append(", forcedShutdownTimedOut=").append(forcedShutdownTimedOut);
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void interrupted(IrqHandler.InterruptData interruptData) {
|
||||
String message = "Service interrupted by " + interruptData.toString();
|
||||
LOG.warn(message);
|
||||
if (!signalAlreadyReceived.compareAndSet(false, true)) {
|
||||
message = "Repeated interrupt: escalating to a JVM halt";
|
||||
LOG.warn(message);
|
||||
// signal already received. On a second request to a hard JVM
|
||||
// halt and so bypass any blocking shutdown hooks.
|
||||
ExitUtil.halt(LauncherExitCodes.EXIT_INTERRUPTED, message);
|
||||
}
|
||||
Service service = getService();
|
||||
if (service != null) {
|
||||
//start an async shutdown thread with a timeout
|
||||
ServiceForcedShutdown shutdown =
|
||||
new ServiceForcedShutdown(service, shutdownTimeMillis);
|
||||
Thread thread = new Thread(shutdown);
|
||||
thread.setDaemon(true);
|
||||
thread.setName("Service Forced Shutdown");
|
||||
thread.start();
|
||||
//wait for that thread to finish
|
||||
try {
|
||||
thread.join(shutdownTimeMillis);
|
||||
} catch (InterruptedException ignored) {
|
||||
//ignored
|
||||
}
|
||||
forcedShutdownTimedOut = !shutdown.getServiceWasShutdown();
|
||||
if (forcedShutdownTimedOut) {
|
||||
LOG.warn("Service did not shut down in time");
|
||||
}
|
||||
}
|
||||
ExitUtil.terminate(EXIT_INTERRUPTED, message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register an interrupt handler.
|
||||
* @param signalName signal name
|
||||
* @throws IllegalArgumentException if the registration failed
|
||||
*/
|
||||
public synchronized void register(String signalName) {
|
||||
IrqHandler handler = new IrqHandler(signalName, this);
|
||||
handler.bind();
|
||||
interruptHandlers.add(handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Look up the handler for a signal.
|
||||
* @param signalName signal name
|
||||
* @return a handler if found
|
||||
*/
|
||||
public synchronized IrqHandler lookup(String signalName) {
|
||||
for (IrqHandler irqHandler : interruptHandlers) {
|
||||
if (irqHandler.getName().equals(signalName)) {
|
||||
return irqHandler;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flag set if forced shut down timed out.
|
||||
* @return true if a shutdown was attempted and it timed out
|
||||
*/
|
||||
public boolean isForcedShutdownTimedOut() {
|
||||
return forcedShutdownTimedOut;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flag set if a signal has been received.
|
||||
* @return true if there has been one interrupt already.
|
||||
*/
|
||||
public boolean isSignalAlreadyReceived() {
|
||||
return signalAlreadyReceived.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Forced shutdown runnable.
|
||||
*/
|
||||
protected static class ServiceForcedShutdown implements Runnable {
|
||||
|
||||
private final int shutdownTimeMillis;
|
||||
private final AtomicBoolean serviceWasShutdown =
|
||||
new AtomicBoolean(false);
|
||||
private Service service;
|
||||
|
||||
public ServiceForcedShutdown(Service service, int shutdownTimeMillis) {
|
||||
this.shutdownTimeMillis = shutdownTimeMillis;
|
||||
this.service = service;
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown callback: stop the service and set an atomic boolean
|
||||
* if it stopped within the shutdown time.
|
||||
*/
|
||||
@Override
|
||||
public void run() {
|
||||
if (service != null) {
|
||||
service.stop();
|
||||
serviceWasShutdown.set(
|
||||
service.waitForServiceToStop(shutdownTimeMillis));
|
||||
} else {
|
||||
serviceWasShutdown.set(true);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Probe for the service being shutdown.
|
||||
* @return true if the service has been shutdown in the runnable
|
||||
*/
|
||||
private boolean getServiceWasShutdown() {
|
||||
return serviceWasShutdown.get();
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,178 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.service.launcher;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import sun.misc.Signal;
|
||||
import sun.misc.SignalHandler;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* Handler of interrupts that relays them to a registered
|
||||
* implementation of {@link IrqHandler.Interrupted}.
|
||||
*
|
||||
* This class bundles up all the compiler warnings about abuse of sun.misc
|
||||
* interrupt handling code into one place.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
@SuppressWarnings("UseOfSunClasses")
|
||||
public final class IrqHandler implements SignalHandler {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(IrqHandler.class);
|
||||
|
||||
/**
|
||||
* Definition of the Control-C handler name: {@value}.
|
||||
*/
|
||||
public static final String CONTROL_C = "INT";
|
||||
|
||||
/**
|
||||
* Definition of default <code>kill</code> signal: {@value}.
|
||||
*/
|
||||
public static final String SIGTERM = "TERM";
|
||||
|
||||
/**
|
||||
* Signal name.
|
||||
*/
|
||||
private final String name;
|
||||
|
||||
/**
|
||||
* Handler to relay to.
|
||||
*/
|
||||
private final Interrupted handler;
|
||||
|
||||
/** Count of how many times a signal has been raised. */
|
||||
private final AtomicInteger signalCount = new AtomicInteger(0);
|
||||
|
||||
/**
|
||||
* Stored signal.
|
||||
*/
|
||||
private Signal signal;
|
||||
|
||||
/**
|
||||
* Create an IRQ handler bound to the specific interrupt.
|
||||
* @param name signal name
|
||||
* @param handler handler
|
||||
*/
|
||||
public IrqHandler(String name, Interrupted handler) {
|
||||
Preconditions.checkArgument(name != null, "Null \"name\"");
|
||||
Preconditions.checkArgument(handler != null, "Null \"handler\"");
|
||||
this.handler = handler;
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Bind to the interrupt handler.
|
||||
* @throws IllegalArgumentException if the exception could not be set
|
||||
*/
|
||||
void bind() {
|
||||
Preconditions.checkState(signal == null, "Handler already bound");
|
||||
try {
|
||||
signal = new Signal(name);
|
||||
Signal.handle(signal, this);
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new IllegalArgumentException(
|
||||
"Could not set handler for signal \"" + name + "\"."
|
||||
+ "This can happen if the JVM has the -Xrs set.",
|
||||
e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the signal name.
|
||||
*/
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Raise the signal.
|
||||
*/
|
||||
public void raise() {
|
||||
Signal.raise(signal);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "IrqHandler for signal " + name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handler for the JVM API for signal handling.
|
||||
* @param s signal raised
|
||||
*/
|
||||
@Override
|
||||
public void handle(Signal s) {
|
||||
signalCount.incrementAndGet();
|
||||
InterruptData data = new InterruptData(s.getName(), s.getNumber());
|
||||
LOG.info("Interrupted: {}", data);
|
||||
handler.interrupted(data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the count of how many times a signal has been raised.
|
||||
* @return the count of signals
|
||||
*/
|
||||
public int getSignalCount() {
|
||||
return signalCount.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback issues on an interrupt.
|
||||
*/
|
||||
public interface Interrupted {
|
||||
|
||||
/**
|
||||
* Handle an interrupt.
|
||||
* @param interruptData data
|
||||
*/
|
||||
void interrupted(InterruptData interruptData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Interrupt data to pass on.
|
||||
*/
|
||||
public static class InterruptData {
|
||||
private final String name;
|
||||
private final int number;
|
||||
|
||||
public InterruptData(String name, int number) {
|
||||
this.name = name;
|
||||
this.number = number;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public int getNumber() {
|
||||
return number;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "signal " + name + '(' + number + ')';
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,95 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.service.launcher;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.service.Service;
|
||||
|
||||
/**
|
||||
* An interface which services can implement to have their
|
||||
* execution managed by the ServiceLauncher.
|
||||
* <p>
|
||||
* The command line options will be passed down before the
|
||||
* {@link Service#init(Configuration)} operation is invoked via an
|
||||
* invocation of {@link LaunchableService#bindArgs(Configuration, List)}
|
||||
* After the service has been successfully started via {@link Service#start()}
|
||||
* the {@link LaunchableService#execute()} method is called to execute the
|
||||
* service. When this method returns, the service launcher will exit, using
|
||||
* the return code from the method as its exit option.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public interface LaunchableService extends Service {
|
||||
|
||||
/**
|
||||
* Propagate the command line arguments.
|
||||
* <p>
|
||||
* This method is called before {@link #init(Configuration)};
|
||||
* Any non-null configuration that is returned from this operation
|
||||
* becomes the one that is passed on to that {@link #init(Configuration)}
|
||||
* operation.
|
||||
* <p>
|
||||
* This permits implementations to change the configuration before
|
||||
* the init operation. As the ServiceLauncher only creates
|
||||
* an instance of the base {@link Configuration} class, it is
|
||||
* recommended to instantiate any subclass (such as YarnConfiguration)
|
||||
* that injects new resources.
|
||||
* <p>
|
||||
* @param config the initial configuration build up by the
|
||||
* service launcher.
|
||||
* @param args list of arguments passed to the command line
|
||||
* after any launcher-specific commands have been stripped.
|
||||
* @return the configuration to init the service with.
|
||||
* Recommended: pass down the config parameter with any changes
|
||||
* @throws Exception any problem
|
||||
*/
|
||||
Configuration bindArgs(Configuration config, List<String> args)
|
||||
throws Exception;
|
||||
|
||||
/**
|
||||
* Run a service. This method is called after {@link Service#start()}.
|
||||
* <p>
|
||||
* The return value becomes the exit code of the launched process.
|
||||
* <p>
|
||||
* If an exception is raised, the policy is:
|
||||
* <ol>
|
||||
* <li>Any subset of {@link org.apache.hadoop.util.ExitUtil.ExitException}:
|
||||
* the exception is passed up unmodified.
|
||||
* </li>
|
||||
* <li>Any exception which implements
|
||||
* {@link org.apache.hadoop.util.ExitCodeProvider}:
|
||||
* A new {@link ServiceLaunchException} is created with the exit code
|
||||
* and message of the thrown exception; the thrown exception becomes the
|
||||
* cause.</li>
|
||||
* <li>Any other exception: a new {@link ServiceLaunchException} is created
|
||||
* with the exit code {@link LauncherExitCodes#EXIT_EXCEPTION_THROWN} and
|
||||
* the message of the original exception (which becomes the cause).</li>
|
||||
* </ol>
|
||||
* @return the exit code
|
||||
* @throws org.apache.hadoop.util.ExitUtil.ExitException an exception passed
|
||||
* up as the exit code and error text.
|
||||
* @throws Exception any exception to report. If it provides an exit code
|
||||
* this is used in a wrapping exception.
|
||||
*/
|
||||
int execute() throws Exception;
|
||||
}
|
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.service.launcher;
|
||||
|
||||
/**
|
||||
* Standard launcher arguments. These are all from
|
||||
* the {@code GenericOptionsParser}, simply extracted to constants.
|
||||
*/
|
||||
public interface LauncherArguments {
|
||||
/**
|
||||
* Name of the configuration argument on the CLI.
|
||||
* Value: {@value}
|
||||
*/
|
||||
String ARG_CONF = "conf";
|
||||
String ARG_CONF_SHORT = "conf";
|
||||
|
||||
/**
|
||||
* prefixed version of {@link #ARG_CONF}.
|
||||
* Value: {@value}
|
||||
*/
|
||||
String ARG_CONF_PREFIXED = "--" + ARG_CONF;
|
||||
|
||||
/**
|
||||
* Name of a configuration class which is loaded before any
|
||||
* attempt is made to load the class.
|
||||
* <p>
|
||||
* Value: {@value}
|
||||
*/
|
||||
String ARG_CONFCLASS = "hadoopconf";
|
||||
String ARG_CONFCLASS_SHORT = "hadoopconf";
|
||||
|
||||
/**
|
||||
* Prefixed version of {@link #ARG_CONFCLASS}.
|
||||
* Value: {@value}
|
||||
*/
|
||||
String ARG_CONFCLASS_PREFIXED = "--" + ARG_CONFCLASS;
|
||||
|
||||
/**
|
||||
* Error string on a parse failure.
|
||||
* Value: {@value}
|
||||
*/
|
||||
String E_PARSE_FAILED = "Failed to parse: ";
|
||||
}
|
@ -0,0 +1,183 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.service.launcher;
|
||||
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* Common Exit codes.
|
||||
* <p>
|
||||
* Codes with a YARN prefix are YARN-related.
|
||||
* <p>
|
||||
* Many of the exit codes are designed to resemble HTTP error codes,
|
||||
* squashed into a single byte. e.g 44 , "not found" is the equivalent
|
||||
* of 404. The various 2XX HTTP error codes aren't followed;
|
||||
* the Unix standard of "0" for success is used.
|
||||
* <pre>
|
||||
* 0-10: general command issues
|
||||
* 30-39: equivalent to the 3XX responses, where those responses are
|
||||
* considered errors by the application.
|
||||
* 40-49: client-side/CLI/config problems
|
||||
* 50-59: service-side problems.
|
||||
* 60+ : application specific error codes
|
||||
* </pre>
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public interface LauncherExitCodes {
|
||||
|
||||
/**
|
||||
* Success: {@value}.
|
||||
*/
|
||||
int EXIT_SUCCESS = 0;
|
||||
|
||||
/**
|
||||
* Generic "false/fail" response: {@value}.
|
||||
* The operation worked but the result was not "true" from the viewpoint
|
||||
* of the executed code.
|
||||
*/
|
||||
int EXIT_FAIL = -1;
|
||||
|
||||
/**
|
||||
* Exit code when a client requested service termination: {@value}.
|
||||
*/
|
||||
int EXIT_CLIENT_INITIATED_SHUTDOWN = 1;
|
||||
|
||||
/**
|
||||
* Exit code when targets could not be launched: {@value}.
|
||||
*/
|
||||
int EXIT_TASK_LAUNCH_FAILURE = 2;
|
||||
|
||||
/**
|
||||
* Exit code when a control-C, kill -3, signal was picked up: {@value}.
|
||||
*/
|
||||
int EXIT_INTERRUPTED = 3;
|
||||
|
||||
/**
|
||||
* Exit code when something happened but we can't be specific: {@value}.
|
||||
*/
|
||||
int EXIT_OTHER_FAILURE = 5;
|
||||
|
||||
/**
|
||||
* Exit code when the command line doesn't parse: {@value}, or
|
||||
* when it is otherwise invalid.
|
||||
* <p>
|
||||
* Approximate HTTP equivalent: {@code 400 Bad Request}
|
||||
*/
|
||||
int EXIT_COMMAND_ARGUMENT_ERROR = 40;
|
||||
|
||||
/**
|
||||
* The request requires user authentication: {@value}.
|
||||
* <p>
|
||||
* approximate HTTP equivalent: Approximate HTTP equivalent: {@code 401 Unauthorized}
|
||||
*/
|
||||
int EXIT_UNAUTHORIZED = 41;
|
||||
|
||||
/**
|
||||
* Exit code when a usage message was printed: {@value}.
|
||||
*/
|
||||
int EXIT_USAGE = 42;
|
||||
|
||||
/**
|
||||
* Forbidden action: {@value}.
|
||||
* <p>
|
||||
* Approximate HTTP equivalent: Approximate HTTP equivalent: {@code 403: Forbidden}
|
||||
*/
|
||||
int EXIT_FORBIDDEN = 43;
|
||||
|
||||
/**
|
||||
* Something was not found: {@value}.
|
||||
* <p>
|
||||
* Approximate HTTP equivalent: {@code 404: Not Found}
|
||||
*/
|
||||
int EXIT_NOT_FOUND = 44;
|
||||
|
||||
/**
|
||||
* The operation is not allowed: {@value}.
|
||||
* <p>
|
||||
* Approximate HTTP equivalent: {@code 405: Not allowed}
|
||||
*/
|
||||
int EXIT_OPERATION_NOT_ALLOWED = 45;
|
||||
|
||||
/**
|
||||
* The command is somehow not acceptable: {@value}.
|
||||
* <p>
|
||||
* Approximate HTTP equivalent: {@code 406: Not Acceptable}
|
||||
*/
|
||||
int EXIT_NOT_ACCEPTABLE = 46;
|
||||
|
||||
/**
|
||||
* Exit code on connectivity problems: {@value}.
|
||||
* <p>
|
||||
* Approximate HTTP equivalent: {@code 408: Request Timeout}
|
||||
*/
|
||||
int EXIT_CONNECTIVITY_PROBLEM = 48;
|
||||
|
||||
/**
|
||||
* Exit code when the configurations in valid/incomplete: {@value}.
|
||||
* <p>
|
||||
* Approximate HTTP equivalent: {@code 409: Conflict}
|
||||
*/
|
||||
int EXIT_BAD_CONFIGURATION = 49;
|
||||
|
||||
/**
|
||||
* Exit code when an exception was thrown from the service: {@value}.
|
||||
* <p>
|
||||
* Approximate HTTP equivalent: {@code 500 Internal Server Error}
|
||||
*/
|
||||
int EXIT_EXCEPTION_THROWN = 50;
|
||||
|
||||
/**
|
||||
* Unimplemented feature: {@value}.
|
||||
* <p>
|
||||
* Approximate HTTP equivalent: {@code 501: Not Implemented}
|
||||
*/
|
||||
int EXIT_UNIMPLEMENTED = 51;
|
||||
|
||||
/**
|
||||
* Service Unavailable; it may be available later: {@value}.
|
||||
* <p>
|
||||
* Approximate HTTP equivalent: {@code 503 Service Unavailable}
|
||||
*/
|
||||
int EXIT_SERVICE_UNAVAILABLE = 53;
|
||||
|
||||
/**
|
||||
* The application does not support, or refuses to support this
|
||||
* version: {@value}.
|
||||
* <p>
|
||||
* If raised, this is expected to be raised server-side and likely due
|
||||
* to client/server version incompatibilities.
|
||||
* <p>
|
||||
* Approximate HTTP equivalent: {@code 505: Version Not Supported}
|
||||
*/
|
||||
int EXIT_UNSUPPORTED_VERSION = 55;
|
||||
|
||||
/**
|
||||
* The service instance could not be created: {@value}.
|
||||
*/
|
||||
int EXIT_SERVICE_CREATION_FAILURE = 56;
|
||||
|
||||
/**
|
||||
* The service instance could not be created: {@value}.
|
||||
*/
|
||||
int EXIT_SERVICE_LIFECYCLE_EXCEPTION = 57;
|
||||
|
||||
}
|
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.service.launcher;
|
||||
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.util.ExitCodeProvider;
|
||||
import org.apache.hadoop.util.ExitUtil;
|
||||
|
||||
/**
|
||||
* A service launch exception that includes an exit code.
|
||||
* <p>
|
||||
* When caught by the ServiceLauncher, it will convert that
|
||||
* into a process exit code.
|
||||
*
|
||||
* The {@link #ServiceLaunchException(int, String, Object...)} constructor
|
||||
* generates formatted exceptions.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
|
||||
public class ServiceLaunchException extends ExitUtil.ExitException
|
||||
implements ExitCodeProvider, LauncherExitCodes {
|
||||
|
||||
/**
|
||||
* Create an exception with the specific exit code.
|
||||
* @param exitCode exit code
|
||||
* @param cause cause of the exception
|
||||
*/
|
||||
public ServiceLaunchException(int exitCode, Throwable cause) {
|
||||
super(exitCode, cause);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an exception with the specific exit code and text.
|
||||
* @param exitCode exit code
|
||||
* @param message message to use in exception
|
||||
*/
|
||||
public ServiceLaunchException(int exitCode, String message) {
|
||||
super(exitCode, message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a formatted exception.
|
||||
* <p>
|
||||
* This uses {@link String#format(String, Object...)}
|
||||
* to build the formatted exception in the ENGLISH locale.
|
||||
* <p>
|
||||
* If the last argument is a throwable, it becomes the cause of the exception.
|
||||
* It will also be used as a parameter for the format.
|
||||
* @param exitCode exit code
|
||||
* @param format format for message to use in exception
|
||||
* @param args list of arguments
|
||||
*/
|
||||
public ServiceLaunchException(int exitCode, String format, Object... args) {
|
||||
super(exitCode, String.format(Locale.ENGLISH, format, args));
|
||||
if (args.length > 0 && (args[args.length - 1] instanceof Throwable)) {
|
||||
initCause((Throwable) args[args.length - 1]);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.service.launcher;
|
||||
|
||||
import java.lang.ref.WeakReference;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.service.Service;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
|
||||
/**
|
||||
* JVM Shutdown hook for Service which will stop the
|
||||
* Service gracefully in case of JVM shutdown.
|
||||
* This hook uses a weak reference to the service,
|
||||
* and when shut down, calls {@link Service#stop()} if the reference is valid.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class ServiceShutdownHook implements Runnable {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(
|
||||
ServiceShutdownHook.class);
|
||||
|
||||
/**
|
||||
* A weak reference to the service.
|
||||
*/
|
||||
private final WeakReference<Service> serviceRef;
|
||||
|
||||
/**
|
||||
* Create an instance.
|
||||
* @param service the service
|
||||
*/
|
||||
public ServiceShutdownHook(Service service) {
|
||||
serviceRef = new WeakReference<>(service);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register the service for shutdown with Hadoop's
|
||||
* {@link ShutdownHookManager}.
|
||||
* @param priority shutdown hook priority
|
||||
*/
|
||||
public synchronized void register(int priority) {
|
||||
unregister();
|
||||
ShutdownHookManager.get().addShutdownHook(this, priority);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister the hook.
|
||||
*/
|
||||
public synchronized void unregister() {
|
||||
try {
|
||||
ShutdownHookManager.get().removeShutdownHook(this);
|
||||
} catch (IllegalStateException e) {
|
||||
LOG.info("Failed to unregister shutdown hook: {}", e, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown handler.
|
||||
* Query the service hook reference -if it is still valid the
|
||||
* {@link Service#stop()} operation is invoked.
|
||||
*/
|
||||
@Override
|
||||
public void run() {
|
||||
shutdown();
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown operation.
|
||||
* <p>
|
||||
* Subclasses may extend it, but it is primarily
|
||||
* made available for testing.
|
||||
* @return true if the service was stopped and no exception was raised.
|
||||
*/
|
||||
protected boolean shutdown() {
|
||||
Service service;
|
||||
boolean result = false;
|
||||
synchronized (this) {
|
||||
service = serviceRef.get();
|
||||
serviceRef.clear();
|
||||
}
|
||||
if (service != null) {
|
||||
try {
|
||||
// Stop the Service
|
||||
service.stop();
|
||||
result = true;
|
||||
} catch (Throwable t) {
|
||||
LOG.info("Error stopping {}", service.getName(), t);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
@ -0,0 +1,462 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
|
||||
This package contains classes, interfaces and exceptions to launch
|
||||
YARN services from the command line.
|
||||
|
||||
<h2>Key Features</h2>
|
||||
|
||||
<p>
|
||||
<b>General purpose YARN service launcher</b>:<p>
|
||||
The {@link org.apache.hadoop.service.launcher.ServiceLauncher} class parses
|
||||
a command line, then instantiates and launches the specified YARN service. It
|
||||
then waits for the service to finish, converting any exceptions raised or
|
||||
exit codes returned into an exit code for the (then exited) process.
|
||||
<p>
|
||||
This class is designed be invokable from the static
|
||||
{@link org.apache.hadoop.service.launcher.ServiceLauncher#main(String[])}
|
||||
method, or from {@code main(String[])} methods implemented by
|
||||
other classes which provide their own entry points.
|
||||
|
||||
|
||||
<p>
|
||||
<b>Extended YARN Service Interface</b>:<p>
|
||||
The {@link org.apache.hadoop.service.launcher.LaunchableService} interface
|
||||
extends {@link org.apache.hadoop.service.Service} with methods to pass
|
||||
down the CLI arguments and to execute an operation without having to
|
||||
spawn a thread in the {@link org.apache.hadoop.service.Service#start()} phase.
|
||||
|
||||
|
||||
<p>
|
||||
<b>Standard Exit codes</b>:<p>
|
||||
{@link org.apache.hadoop.service.launcher.LauncherExitCodes}
|
||||
defines a set of exit codes that can be used by services to standardize
|
||||
exit causes.
|
||||
|
||||
<p>
|
||||
<b>Escalated shutdown</b>:<p>
|
||||
The {@link org.apache.hadoop.service.launcher.ServiceShutdownHook}
|
||||
shuts down any service via the hadoop shutdown mechanism.
|
||||
The {@link org.apache.hadoop.service.launcher.InterruptEscalator} can be
|
||||
registered to catch interrupts, triggering the shutdown -and forcing a JVM
|
||||
exit if it times out or a second interrupt is received.
|
||||
|
||||
<p><b>Tests:</b><p> test cases include interrupt handling and
|
||||
lifecycle failures.
|
||||
|
||||
<h2>Launching a YARN Service</h2>
|
||||
|
||||
The Service Launcher can launch <i>any YARN service</i>.
|
||||
It will instantiate the service classname provided, using the no-args
|
||||
constructor, or if no such constructor is available, it will fall back
|
||||
to a constructor with a single {@code String} parameter,
|
||||
passing the service name as the parameter value.
|
||||
<p>
|
||||
|
||||
The launcher will initialize the service via
|
||||
{@link org.apache.hadoop.service.Service#init(Configuration)},
|
||||
then start it via its {@link org.apache.hadoop.service.Service#start()} method.
|
||||
It then waits indefinitely for the service to stop.
|
||||
<p>
|
||||
After the service has stopped, a non-null value of
|
||||
{@link org.apache.hadoop.service.Service#getFailureCause()} is interpreted
|
||||
as a failure, and, if it didn't happen during the stop phase (i.e. when
|
||||
{@link org.apache.hadoop.service.Service#getFailureState()} is not
|
||||
{@code STATE.STOPPED}, escalated into a non-zero return code).
|
||||
<p>
|
||||
|
||||
To view the workflow in sequence, it is:
|
||||
<ol>
|
||||
<li>(prepare configuration files —covered later)</li>
|
||||
<li>instantiate service via its empty or string constructor</li>
|
||||
<li>call {@link org.apache.hadoop.service.Service#init(Configuration)}</li>
|
||||
<li>call {@link org.apache.hadoop.service.Service#start()}</li>
|
||||
<li>call
|
||||
{@link org.apache.hadoop.service.Service#waitForServiceToStop(long)}</li>
|
||||
<li>If an exception was raised: propagate it</li>
|
||||
<li>If an exception was recorded in
|
||||
{@link org.apache.hadoop.service.Service#getFailureCause()}
|
||||
while the service was running: propagate it.</li>
|
||||
</ol>
|
||||
|
||||
For a service to be fully compatible with this launch model, it must
|
||||
<ul>
|
||||
<li>Start worker threads, processes and executors in its
|
||||
{@link org.apache.hadoop.service.Service#start()} method</li>
|
||||
<li>Terminate itself via a call to
|
||||
{@link org.apache.hadoop.service.Service#stop()}
|
||||
in one of these asynchronous methods.</li>
|
||||
</ul>
|
||||
|
||||
If a service does not stop itself, <i>ever</i>, then it can be launched
|
||||
as a long-lived daemon.
|
||||
The service launcher will never terminate, but neither will the service.
|
||||
The service launcher does register signal handlers to catch {@code kill}
|
||||
and control-C signals —calling {@code stop()} on the service when
|
||||
signaled.
|
||||
This means that a daemon service <i>may</i> get a warning and time to shut
|
||||
down.
|
||||
|
||||
<p>
|
||||
To summarize: provided a service launches its long-lived threads in its Service
|
||||
{@code start()} method, the service launcher can create it, configure it
|
||||
and start it, triggering shutdown when signaled.
|
||||
|
||||
What these services can not do is get at the command line parameters or easily
|
||||
propagate exit codes (there is a way covered later). These features require
|
||||
some extensions to the base {@code Service} interface: <i>the Launchable
|
||||
Service</i>.
|
||||
|
||||
<h2>Launching a Launchable YARN Service</h2>
|
||||
|
||||
A Launchable YARN Service is a YARN service which implements the interface
|
||||
{@link org.apache.hadoop.service.launcher.LaunchableService}.
|
||||
<p>
|
||||
It adds two methods to the service interface —and hence two new features:
|
||||
|
||||
<ol>
|
||||
<li>Access to the command line passed to the service launcher </li>
|
||||
<li>A blocking {@code int execute()} method which can return the exit
|
||||
code for the application.</li>
|
||||
</ol>
|
||||
|
||||
This design is ideal for implementing services which parse the command line,
|
||||
and which execute short-lived applications. For example, end user
|
||||
commands can be implemented as such services, thus integrating with YARN's
|
||||
workflow and {@code YarnClient} client-side code.
|
||||
|
||||
<p>
|
||||
It can just as easily be used for implementing long-lived services that
|
||||
parse the command line -it just becomes the responsibility of the
|
||||
service to decide when to return from the {@code execute()} method.
|
||||
It doesn't even need to {@code stop()} itself; the launcher will handle
|
||||
that if necessary.
|
||||
<p>
|
||||
The {@link org.apache.hadoop.service.launcher.LaunchableService} interface
|
||||
extends {@link org.apache.hadoop.service.Service} with two new methods.
|
||||
|
||||
<p>
|
||||
{@link org.apache.hadoop.service.launcher.LaunchableService#bindArgs(Configuration, List)}
|
||||
provides the {@code main(String args[])} arguments as a list, after any
|
||||
processing by the Service Launcher to extract configuration file references.
|
||||
This method <i>is called before
|
||||
{@link org.apache.hadoop.service.Service#init(Configuration)}.</i>
|
||||
This is by design: it allows the arguments to be parsed before the service is
|
||||
initialized, thus allowing services to tune their configuration data before
|
||||
passing it to any superclass in that {@code init()} method.
|
||||
To make this operation even simpler, the
|
||||
{@link org.apache.hadoop.conf.Configuration} that is to be passed in
|
||||
is provided as an argument.
|
||||
This reference passed in is the initial configuration for this service;
|
||||
the one that will be passed to the init operation.
|
||||
|
||||
In
|
||||
{@link org.apache.hadoop.service.launcher.LaunchableService#bindArgs(Configuration, List)},
|
||||
a Launchable Service may manipulate this configuration by setting or removing
|
||||
properties. It may also create a new {@code Configuration} instance
|
||||
which may be needed to trigger the injection of HDFS or YARN resources
|
||||
into the default resources of all Configurations.
|
||||
If the return value of the method call is a configuration
|
||||
reference (as opposed to a null value), the returned value becomes that
|
||||
passed in to the {@code init()} method.
|
||||
<p>
|
||||
After the {@code bindArgs()} processing, the service's {@code init()}
|
||||
and {@code start()} methods are called, as usual.
|
||||
<p>
|
||||
At this point, rather than block waiting for the service to terminate (as
|
||||
is done for a basic service), the method
|
||||
{@link org.apache.hadoop.service.launcher.LaunchableService#execute()}
|
||||
is called.
|
||||
This is a method expected to block until completed, returning the intended
|
||||
application exit code of the process when it does so.
|
||||
<p>
|
||||
After this {@code execute()} operation completes, the
|
||||
service is stopped and exit codes generated. Any exception raised
|
||||
during the {@code execute()} method takes priority over any exit codes
|
||||
returned by the method. This allows services to signal failures simply
|
||||
by raising exceptions with exit codes.
|
||||
<p>
|
||||
|
||||
<p>
|
||||
To view the workflow in sequence, it is:
|
||||
<ol>
|
||||
<li>(prepare configuration files —covered later)</li>
|
||||
<li>instantiate service via its empty or string constructor</li>
|
||||
<li>call {@link org.apache.hadoop.service.launcher.LaunchableService#bindArgs(Configuration, List)}</li>
|
||||
<li>call {@link org.apache.hadoop.service.Service#init(Configuration)} with the existing config,
|
||||
or any new one returned by
|
||||
{@link org.apache.hadoop.service.launcher.LaunchableService#bindArgs(Configuration, List)}</li>
|
||||
<li>call {@link org.apache.hadoop.service.Service#start()}</li>
|
||||
<li>call {@link org.apache.hadoop.service.launcher.LaunchableService#execute()}</li>
|
||||
<li>call {@link org.apache.hadoop.service.Service#stop()}</li>
|
||||
<li>The return code from
|
||||
{@link org.apache.hadoop.service.launcher.LaunchableService#execute()}
|
||||
becomes the exit code of the process, unless overridden by an exception.</li>
|
||||
<li>If an exception was raised in this workflow: propagate it</li>
|
||||
<li>If an exception was recorded in
|
||||
{@link org.apache.hadoop.service.Service#getFailureCause()}
|
||||
while the service was running: propagate it.</li>
|
||||
</ol>
|
||||
|
||||
|
||||
<h2>Exit Codes and Exceptions</h2>
|
||||
|
||||
<p>
|
||||
For a basic service, the return code is 0 unless an exception
|
||||
was raised.
|
||||
<p>
|
||||
For a {@link org.apache.hadoop.service.launcher.LaunchableService}, the return
|
||||
code is the number returned from the
|
||||
{@link org.apache.hadoop.service.launcher.LaunchableService#execute()}
|
||||
operation, again, unless overridden an exception was raised.
|
||||
|
||||
<p>
|
||||
Exceptions are converted into exit codes -but rather than simply
|
||||
have a "something went wrong" exit code, exceptions <i>may</i>
|
||||
provide exit codes which will be extracted and used as the return code.
|
||||
This enables Launchable Services to use exceptions as a way
|
||||
of returning error codes to signal failures and for
|
||||
normal Services to return any error code at all.
|
||||
|
||||
<p>
|
||||
Any exception which implements the
|
||||
{@link org.apache.hadoop.util.ExitCodeProvider}
|
||||
interface is considered be a provider of the exit code: the method
|
||||
{@link org.apache.hadoop.util.ExitCodeProvider#getExitCode()}
|
||||
will be called on the caught exception to generate the return code.
|
||||
This return code and the message in the exception will be used to
|
||||
generate an instance of
|
||||
{@link org.apache.hadoop.util.ExitUtil.ExitException}
|
||||
which can be passed down to
|
||||
{@link org.apache.hadoop.util.ExitUtil#terminate(ExitUtil.ExitException)}
|
||||
to trigger a JVM exit. The initial exception will be used as the cause
|
||||
of the {@link org.apache.hadoop.util.ExitUtil.ExitException}.
|
||||
|
||||
<p>
|
||||
If the exception is already an instance or subclass of
|
||||
{@link org.apache.hadoop.util.ExitUtil.ExitException}, it is passed
|
||||
directly to
|
||||
{@link org.apache.hadoop.util.ExitUtil#terminate(ExitUtil.ExitException)}
|
||||
without any conversion.
|
||||
One such subclass,
|
||||
{@link org.apache.hadoop.service.launcher.ServiceLaunchException}
|
||||
may be useful: it includes formatted exception message generation.
|
||||
It also declares that it extends the
|
||||
{@link org.apache.hadoop.service.launcher.LauncherExitCodes}
|
||||
interface listing common exception codes. These are exception codes
|
||||
that can be raised by the {@link org.apache.hadoop.service.launcher.ServiceLauncher}
|
||||
itself to indicate problems during parsing the command line, creating
|
||||
the service instance and the like. There are also some common exit codes
|
||||
for Hadoop/YARN service failures, such as
|
||||
{@link org.apache.hadoop.service.launcher.LauncherExitCodes#EXIT_UNAUTHORIZED}.
|
||||
Note that {@link org.apache.hadoop.util.ExitUtil.ExitException} itself
|
||||
implements {@link org.apache.hadoop.util.ExitCodeProvider#getExitCode()}
|
||||
|
||||
<p>
|
||||
If an exception does not implement
|
||||
{@link org.apache.hadoop.util.ExitCodeProvider#getExitCode()},
|
||||
it will be wrapped in an {@link org.apache.hadoop.util.ExitUtil.ExitException}
|
||||
with the exit code
|
||||
{@link org.apache.hadoop.service.launcher.LauncherExitCodes#EXIT_EXCEPTION_THROWN}.
|
||||
|
||||
<p>
|
||||
To view the exit code extraction in sequence, it is:
|
||||
<ol>
|
||||
<li>If no exception was triggered by a basic service, a
|
||||
{@link org.apache.hadoop.service.launcher.ServiceLaunchException} with an
|
||||
exit code of 0 is created.</li>
|
||||
|
||||
<li>For a LaunchableService, the exit code is the result of {@code execute()}
|
||||
Again, a {@link org.apache.hadoop.service.launcher.ServiceLaunchException}
|
||||
with a return code of 0 is created.
|
||||
</li>
|
||||
|
||||
<li>Otherwise, if the exception is an instance of {@code ExitException},
|
||||
it is returned as the service terminating exception.</li>
|
||||
|
||||
<li>If the exception implements {@link org.apache.hadoop.util.ExitCodeProvider},
|
||||
its exit code and {@code getMessage()} value become the exit exception.</li>
|
||||
|
||||
<li>Otherwise, it is wrapped as a
|
||||
{@link org.apache.hadoop.service.launcher.ServiceLaunchException}
|
||||
with the exit code
|
||||
{@link org.apache.hadoop.service.launcher.LauncherExitCodes#EXIT_EXCEPTION_THROWN}
|
||||
to indicate that an exception was thrown.</li>
|
||||
|
||||
<li>This is finally passed to
|
||||
{@link org.apache.hadoop.util.ExitUtil#terminate(ExitUtil.ExitException)},
|
||||
by way of
|
||||
{@link org.apache.hadoop.service.launcher.ServiceLauncher#exit(ExitUtil.ExitException)};
|
||||
a method designed to allow subclasses to override for testing.</li>
|
||||
|
||||
<li>The {@link org.apache.hadoop.util.ExitUtil} class then terminates the JVM
|
||||
with the specified exit code, printing the {@code toString()} value
|
||||
of the exception if the return code is non-zero.</li>
|
||||
</ol>
|
||||
|
||||
This process may seem convoluted, but it is designed to allow any exception
|
||||
in the Hadoop exception hierarchy to generate exit codes,
|
||||
and to minimize the amount of exception wrapping which takes place.
|
||||
|
||||
<h2>Interrupt Handling</h2>
|
||||
|
||||
The Service Launcher has a helper class,
|
||||
{@link org.apache.hadoop.service.launcher.InterruptEscalator}
|
||||
to handle the standard SIGKILL signal and control-C signals.
|
||||
This class registers for signal callbacks from these signals, and,
|
||||
when received, attempts to stop the service in a limited period of time.
|
||||
It then triggers a JVM shutdown by way of
|
||||
{@link org.apache.hadoop.util.ExitUtil#terminate(int, String)}
|
||||
<p>
|
||||
If a second signal is received, the
|
||||
{@link org.apache.hadoop.service.launcher.InterruptEscalator}
|
||||
reacts by triggering an immediate JVM halt, invoking
|
||||
{@link org.apache.hadoop.util.ExitUtil#halt(int, String)}.
|
||||
This escalation process is designed to address the situation in which
|
||||
a shutdown-hook can block, yet the caller (such as an init.d daemon)
|
||||
wishes to kill the process.
|
||||
The shutdown script should repeat the kill signal after a chosen time period,
|
||||
to trigger the more aggressive process halt. The exit code will always be
|
||||
{@link org.apache.hadoop.service.launcher.LauncherExitCodes#EXIT_INTERRUPTED}.
|
||||
<p>
|
||||
The {@link org.apache.hadoop.service.launcher.ServiceLauncher} also registers
|
||||
a {@link org.apache.hadoop.service.launcher.ServiceShutdownHook} with the
|
||||
Hadoop shutdown hook manager, unregistering it afterwards. This hook will
|
||||
stop the service if a shutdown request is received, so ensuring that
|
||||
if the JVM is exited by any thread, an attempt to shut down the service
|
||||
will be made.
|
||||
|
||||
|
||||
<h2>Configuration class creation</h2>
|
||||
|
||||
The Configuration class used to initialize a service is a basic
|
||||
{@link org.apache.hadoop.conf.Configuration} instance. As the launcher is
|
||||
the entry point for an application, this implies that the HDFS, YARN or other
|
||||
default configurations will not have been forced in through the constructors
|
||||
of {@code HdfsConfiguration} or {@code YarnConfiguration}.
|
||||
<p>
|
||||
What the launcher does do is use reflection to try and create instances of
|
||||
these classes simply to force in the common resources. If the classes are
|
||||
not on the classpath this fact will be logged.
|
||||
<p>
|
||||
Applications may consider it essential to either force load in the relevant
|
||||
configuration, or pass it down to the service being created. In which
|
||||
case further measures may be needed.
|
||||
|
||||
<p><b>1: Creation in an extended {@code ServiceLauncher}</b>
|
||||
|
||||
<p>
|
||||
Subclass the Service launcher and override its
|
||||
{@link org.apache.hadoop.service.launcher.ServiceLauncher#createConfiguration()}
|
||||
method with one that creates the right configuration.
|
||||
This is good if a single
|
||||
launcher can be created for all services launched by a module, such as
|
||||
HDFS or YARN. It does imply a dedicated script to invoke the custom
|
||||
{@code main()} method.
|
||||
|
||||
<p><b>2: Creation in {@code bindArgs()}</b>
|
||||
|
||||
<p>
|
||||
In
|
||||
{@link org.apache.hadoop.service.launcher.LaunchableService#bindArgs(Configuration, List)},
|
||||
a new configuration is created:
|
||||
|
||||
<pre>
|
||||
public Configuration bindArgs(Configuration config, List<String> args)
|
||||
throws Exception {
|
||||
Configuration newConf = new YarnConfiguration(config);
|
||||
return newConf;
|
||||
}
|
||||
</pre>
|
||||
|
||||
This guarantees a configuration of the right type is generated for all
|
||||
instances created via the service launcher. It does imply that this is
|
||||
expected to be only way that services will be launched.
|
||||
|
||||
<p><b>3: Creation in {@code serviceInit()}</b>
|
||||
|
||||
<pre>
|
||||
protected void serviceInit(Configuration conf) throws Exception {
|
||||
super.serviceInit(new YarnConfiguration(conf));
|
||||
}
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
This is a strategy used by many existing YARN services, and is ideal for
|
||||
services which do not implement the LaunchableService interface. Its one
|
||||
weakness is that the configuration is now private to that instance. Some
|
||||
YARN services use a single shared configuration instance as a way of
|
||||
propagating information between peer services in a
|
||||
{@link org.apache.hadoop.service.CompositeService}.
|
||||
While a dangerous practice, it does happen.
|
||||
|
||||
|
||||
<b>Summary</b>: the ServiceLauncher makes a best-effort attempt to load the
|
||||
standard Configuration subclasses, but does not fail if they are not present.
|
||||
Services which require a specific subclasses should follow one of the
|
||||
strategies listed;
|
||||
creation in {@code serviceInit()} is the recommended policy.
|
||||
|
||||
<h2>Configuration file loading</h2>
|
||||
|
||||
Before the service is bound to the CLI, the ServiceLauncher scans through
|
||||
all the arguments after the first one, looking for instances of the argument
|
||||
{@link org.apache.hadoop.service.launcher.ServiceLauncher#ARG_CONF}
|
||||
argument pair: {@code --conf <file>}. This must refer to a file
|
||||
in the local filesystem which exists.
|
||||
<p>
|
||||
It will be loaded into the Hadoop configuration
|
||||
class (the one created by the
|
||||
{@link org.apache.hadoop.service.launcher.ServiceLauncher#createConfiguration()}
|
||||
method.
|
||||
If this argument is repeated multiple times, all configuration
|
||||
files are merged with the latest file on the command line being the
|
||||
last one to be applied.
|
||||
<p>
|
||||
All the {@code --conf <file>} argument pairs are stripped off
|
||||
the argument list provided to the instantiated service; they get the
|
||||
merged configuration, but not the commands used to create it.
|
||||
|
||||
<h2>Utility Classes</h2>
|
||||
|
||||
<ul>
|
||||
|
||||
<li>
|
||||
{@link org.apache.hadoop.service.launcher.IrqHandler}: registers interrupt
|
||||
handlers using {@code sun.misc} APIs.
|
||||
</li>
|
||||
|
||||
<li>
|
||||
{@link org.apache.hadoop.service.launcher.ServiceLaunchException}: a
|
||||
subclass of {@link org.apache.hadoop.util.ExitUtil.ExitException} which
|
||||
takes a String-formatted format string and a list of arguments to create
|
||||
the exception text.
|
||||
</li>
|
||||
|
||||
</ul>
|
||||
*/
|
||||
|
||||
|
||||
package org.apache.hadoop.service.launcher;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.ExitUtil;
|
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
/**
|
||||
* Get the exit code of an exception.
|
||||
* Making it an interface makes
|
||||
* it possible to retrofit exit codes onto existing classes,
|
||||
* and add exit code providers under all parts of the Exception tree.
|
||||
*/
|
||||
|
||||
public interface ExitCodeProvider {
|
||||
|
||||
/**
|
||||
* Method to get the exit code.
|
||||
* @return the exit code
|
||||
*/
|
||||
int getExitCode();
|
||||
}
|
@ -17,41 +17,127 @@
|
||||
*/
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Facilitates hooking process termination for tests and debugging.
|
||||
* Facilitates hooking process termination for tests, debugging
|
||||
* and embedding.
|
||||
*
|
||||
* Hadoop code that attempts to call {@link System#exit(int)}
|
||||
* or {@link Runtime#halt(int)} MUST invoke it via these methods.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "YARN"})
|
||||
@InterfaceStability.Unstable
|
||||
public final class ExitUtil {
|
||||
private final static Log LOG = LogFactory.getLog(ExitUtil.class.getName());
|
||||
private static final Logger
|
||||
LOG = LoggerFactory.getLogger(ExitUtil.class.getName());
|
||||
private static volatile boolean systemExitDisabled = false;
|
||||
private static volatile boolean systemHaltDisabled = false;
|
||||
private static volatile ExitException firstExitException;
|
||||
private static volatile HaltException firstHaltException;
|
||||
/** Message raised from an exit exception if none were provided: {@value}. */
|
||||
public static final String EXIT_EXCEPTION_MESSAGE = "ExitException";
|
||||
/** Message raised from a halt exception if none were provided: {@value}. */
|
||||
public static final String HALT_EXCEPTION_MESSAGE = "HaltException";
|
||||
|
||||
public static class ExitException extends RuntimeException {
|
||||
private ExitUtil() {
|
||||
}
|
||||
|
||||
/**
|
||||
* An exception raised when a call to {@link #terminate(int)} was
|
||||
* called and system exits were blocked.
|
||||
*/
|
||||
public static class ExitException extends RuntimeException
|
||||
implements ExitCodeProvider {
|
||||
private static final long serialVersionUID = 1L;
|
||||
/**
|
||||
* The status code.
|
||||
*/
|
||||
public final int status;
|
||||
|
||||
public ExitException(int status, String msg) {
|
||||
super(msg);
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public ExitException(int status,
|
||||
String message,
|
||||
Throwable cause) {
|
||||
super(message, cause);
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public ExitException(int status, Throwable cause) {
|
||||
super(cause);
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getExitCode() {
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* String value does not include exception type, just exit code and message.
|
||||
* @return the exit code and any message
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
String message = getMessage();
|
||||
if (message == null) {
|
||||
message = super.toString();
|
||||
}
|
||||
return Integer.toString(status) + ": " + message;
|
||||
}
|
||||
}
|
||||
|
||||
public static class HaltException extends RuntimeException {
|
||||
/**
|
||||
* An exception raised when a call to {@link #terminate(int)} was
|
||||
* called and system halts were blocked.
|
||||
*/
|
||||
public static class HaltException extends RuntimeException
|
||||
implements ExitCodeProvider {
|
||||
private static final long serialVersionUID = 1L;
|
||||
public final int status;
|
||||
|
||||
public HaltException(int status, Throwable cause) {
|
||||
super(cause);
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public HaltException(int status, String msg) {
|
||||
super(msg);
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public HaltException(int status,
|
||||
String message,
|
||||
Throwable cause) {
|
||||
super(message, cause);
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getExitCode() {
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* String value does not include exception type, just exit code and message.
|
||||
* @return the exit code and any message
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
String message = getMessage();
|
||||
if (message == null) {
|
||||
message = super.toString();
|
||||
}
|
||||
return Integer.toString(status) + ": " + message;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -69,7 +155,7 @@ public static void disableSystemHalt() {
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if terminate has been called
|
||||
* @return true if terminate has been called.
|
||||
*/
|
||||
public static boolean terminateCalled() {
|
||||
// Either we set this member or we actually called System#exit
|
||||
@ -77,21 +163,21 @@ public static boolean terminateCalled() {
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if halt has been called
|
||||
* @return true if halt has been called.
|
||||
*/
|
||||
public static boolean haltCalled() {
|
||||
return firstHaltException != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the first ExitException thrown, null if none thrown yet
|
||||
* @return the first ExitException thrown, null if none thrown yet.
|
||||
*/
|
||||
public static ExitException getFirstExitException() {
|
||||
return firstExitException;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the first {@code HaltException} thrown, null if none thrown yet
|
||||
* @return the first {@code HaltException} thrown, null if none thrown yet.
|
||||
*/
|
||||
public static HaltException getFirstHaltException() {
|
||||
return firstHaltException;
|
||||
@ -110,22 +196,22 @@ public static void resetFirstHaltException() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Terminate the current process. Note that terminate is the *only* method
|
||||
* that should be used to terminate the daemon processes.
|
||||
*
|
||||
* @param status
|
||||
* exit code
|
||||
* @param msg
|
||||
* message used to create the {@code ExitException}
|
||||
* @throws ExitException
|
||||
* if System.exit is disabled for test purposes
|
||||
* Inner termination: either exit with the exception's exit code,
|
||||
* or, if system exits are disabled, rethrow the exception.
|
||||
* @param ee exit exception
|
||||
*/
|
||||
public static void terminate(int status, String msg) throws ExitException {
|
||||
LOG.info("Exiting with status " + status);
|
||||
public static synchronized void terminate(ExitException ee)
|
||||
throws ExitException {
|
||||
int status = ee.getExitCode();
|
||||
String msg = ee.getMessage();
|
||||
if (status != 0) {
|
||||
//exit indicates a problem, log it
|
||||
LOG.debug("Exiting with status {}: {}", status, msg, ee);
|
||||
LOG.info("Exiting with status {}: {}", status, msg);
|
||||
}
|
||||
if (systemExitDisabled) {
|
||||
ExitException ee = new ExitException(status, msg);
|
||||
LOG.fatal("Terminate called", ee);
|
||||
if (null == firstExitException) {
|
||||
LOG.error("Terminate called", ee);
|
||||
if (!terminateCalled()) {
|
||||
firstExitException = ee;
|
||||
}
|
||||
throw ee;
|
||||
@ -135,20 +221,26 @@ public static void terminate(int status, String msg) throws ExitException {
|
||||
|
||||
/**
|
||||
* Forcibly terminates the currently running Java virtual machine.
|
||||
*
|
||||
* @param status
|
||||
* exit code
|
||||
* @param msg
|
||||
* message used to create the {@code HaltException}
|
||||
* @throws HaltException
|
||||
* if Runtime.getRuntime().halt() is disabled for test purposes
|
||||
* The exception argument is rethrown if JVM halting is disabled.
|
||||
* @param ee the exception containing the status code, message and any stack
|
||||
* trace.
|
||||
* @throws HaltException if {@link Runtime#halt(int)} is disabled.
|
||||
*/
|
||||
public static void halt(int status, String msg) throws HaltException {
|
||||
LOG.info("Halt with status " + status + " Message: " + msg);
|
||||
public static synchronized void halt(HaltException ee) throws HaltException {
|
||||
int status = ee.getExitCode();
|
||||
String msg = ee.getMessage();
|
||||
try {
|
||||
if (status != 0) {
|
||||
//exit indicates a problem, log it
|
||||
LOG.debug("Halt with status {}: {}", status, msg, ee);
|
||||
LOG.info("Halt with status {}: {}", status, msg, msg);
|
||||
}
|
||||
} catch (Exception ignored) {
|
||||
// ignore exceptions here, as it may be due to an out of memory situation
|
||||
}
|
||||
if (systemHaltDisabled) {
|
||||
HaltException ee = new HaltException(status, msg);
|
||||
LOG.fatal("Halt called", ee);
|
||||
if (null == firstHaltException) {
|
||||
LOG.error("Halt called", ee);
|
||||
if (!haltCalled()) {
|
||||
firstHaltException = ee;
|
||||
}
|
||||
throw ee;
|
||||
@ -157,47 +249,94 @@ public static void halt(int status, String msg) throws HaltException {
|
||||
}
|
||||
|
||||
/**
|
||||
* Like {@link terminate(int, String)} but uses the given throwable to
|
||||
* initialize the ExitException.
|
||||
*
|
||||
* @param status
|
||||
* @param t
|
||||
* throwable used to create the ExitException
|
||||
* @throws ExitException
|
||||
* if System.exit is disabled for test purposes
|
||||
* Like {@link #terminate(int, String)} but uses the given throwable to
|
||||
* build the message to display or throw as an
|
||||
* {@link ExitException}.
|
||||
* <p>
|
||||
* @param status exit code to use if the exception is not an ExitException.
|
||||
* @param t throwable which triggered the termination. If this exception
|
||||
* is an {@link ExitException} its status overrides that passed in.
|
||||
* @throws ExitException if {@link System#exit(int)} is disabled.
|
||||
*/
|
||||
public static void terminate(int status, Throwable t) throws ExitException {
|
||||
terminate(status, StringUtils.stringifyException(t));
|
||||
if (t instanceof ExitException) {
|
||||
terminate((ExitException) t);
|
||||
} else {
|
||||
terminate(new ExitException(status, t));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Forcibly terminates the currently running Java virtual machine.
|
||||
*
|
||||
* @param status
|
||||
* @param t
|
||||
* @throws ExitException
|
||||
* @param status exit code to use if the exception is not a HaltException.
|
||||
* @param t throwable which triggered the termination. If this exception
|
||||
* is a {@link HaltException} its status overrides that passed in.
|
||||
* @throws HaltException if {@link System#exit(int)} is disabled.
|
||||
*/
|
||||
public static void halt(int status, Throwable t) throws HaltException {
|
||||
halt(status, StringUtils.stringifyException(t));
|
||||
if (t instanceof HaltException) {
|
||||
halt((HaltException) t);
|
||||
} else {
|
||||
halt(new HaltException(status, t));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Like {@link terminate(int, String)} without a message.
|
||||
* Like {@link #terminate(int, Throwable)} without a message.
|
||||
*
|
||||
* @param status
|
||||
* @throws ExitException
|
||||
* if System.exit is disabled for test purposes
|
||||
* @param status exit code
|
||||
* @throws ExitException if {@link System#exit(int)} is disabled.
|
||||
*/
|
||||
public static void terminate(int status) throws ExitException {
|
||||
terminate(status, "ExitException");
|
||||
terminate(status, EXIT_EXCEPTION_MESSAGE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Terminate the current process. Note that terminate is the *only* method
|
||||
* that should be used to terminate the daemon processes.
|
||||
*
|
||||
* @param status exit code
|
||||
* @param msg message used to create the {@code ExitException}
|
||||
* @throws ExitException if {@link System#exit(int)} is disabled.
|
||||
*/
|
||||
public static void terminate(int status, String msg) throws ExitException {
|
||||
terminate(new ExitException(status, msg));
|
||||
}
|
||||
|
||||
/**
|
||||
* Forcibly terminates the currently running Java virtual machine.
|
||||
* @param status
|
||||
* @throws ExitException
|
||||
* @param status status code
|
||||
* @throws HaltException if {@link Runtime#halt(int)} is disabled.
|
||||
*/
|
||||
public static void halt(int status) throws HaltException {
|
||||
halt(status, "HaltException");
|
||||
halt(status, HALT_EXCEPTION_MESSAGE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Forcibly terminates the currently running Java virtual machine.
|
||||
* @param status status code
|
||||
* @param message message
|
||||
* @throws HaltException if {@link Runtime#halt(int)} is disabled.
|
||||
*/
|
||||
public static void halt(int status, String message) throws HaltException {
|
||||
halt(new HaltException(status, message));
|
||||
}
|
||||
|
||||
/**
|
||||
* Handler for out of memory events -no attempt is made here
|
||||
* to cleanly shutdown or support halt blocking; a robust
|
||||
* printing of the event to stderr is all that can be done.
|
||||
* @param oome out of memory event
|
||||
*/
|
||||
public static void haltOnOutOfMemory(OutOfMemoryError oome) {
|
||||
//After catching an OOM java says it is undefined behavior, so don't
|
||||
//even try to clean up or we can get stuck on shutdown.
|
||||
try {
|
||||
System.err.println("Halting due to Out Of Memory Error...");
|
||||
} catch (Throwable err) {
|
||||
//Again we done want to exit because of logging issues.
|
||||
}
|
||||
Runtime.getRuntime().halt(-1);
|
||||
}
|
||||
}
|
||||
|
@ -15,9 +15,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import java.io.File;
|
||||
package org.apache.hadoop.util;import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
@ -118,6 +116,7 @@ public class GenericOptionsParser {
|
||||
private static final Log LOG = LogFactory.getLog(GenericOptionsParser.class);
|
||||
private Configuration conf;
|
||||
private CommandLine commandLine;
|
||||
private final boolean parseSuccessful;
|
||||
|
||||
/**
|
||||
* Create an options parser with the given options to parse the args.
|
||||
@ -171,7 +170,7 @@ public GenericOptionsParser(Configuration conf, String[] args)
|
||||
public GenericOptionsParser(Configuration conf,
|
||||
Options options, String[] args) throws IOException {
|
||||
this.conf = conf;
|
||||
parseGeneralOptions(options, args);
|
||||
parseSuccessful = parseGeneralOptions(options, args);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -208,58 +207,72 @@ public CommandLine getCommandLine() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Specify properties of each generic option
|
||||
* Query for the parse operation succeeding.
|
||||
* @return true if parsing the CLI was successful
|
||||
*/
|
||||
public boolean isParseSuccessful() {
|
||||
return parseSuccessful;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specify properties of each generic option.
|
||||
* <i>Important</i?: as {@link OptionBuilder} is not thread safe, subclasses
|
||||
* must synchronize use on {@code OptionBuilder.class}
|
||||
*/
|
||||
@SuppressWarnings("static-access")
|
||||
private static synchronized Options buildGeneralOptions(Options opts) {
|
||||
Option fs = OptionBuilder.withArgName("file:///|hdfs://namenode:port")
|
||||
.hasArg()
|
||||
.withDescription("specify default filesystem URL to use, "
|
||||
+ "overrides 'fs.defaultFS' property from configurations.")
|
||||
.create("fs");
|
||||
Option jt = OptionBuilder.withArgName("local|resourcemanager:port")
|
||||
.hasArg()
|
||||
.withDescription("specify a ResourceManager")
|
||||
.create("jt");
|
||||
Option oconf = OptionBuilder.withArgName("configuration file")
|
||||
.hasArg()
|
||||
.withDescription("specify an application configuration file")
|
||||
.create("conf");
|
||||
Option property = OptionBuilder.withArgName("property=value")
|
||||
.hasArg()
|
||||
.withDescription("use value for given property")
|
||||
.create('D');
|
||||
Option libjars = OptionBuilder.withArgName("paths")
|
||||
.hasArg()
|
||||
.withDescription("comma separated jar files to include in the classpath.")
|
||||
.create("libjars");
|
||||
Option files = OptionBuilder.withArgName("paths")
|
||||
.hasArg()
|
||||
.withDescription("comma separated files to be copied to the " +
|
||||
"map reduce cluster")
|
||||
.create("files");
|
||||
Option archives = OptionBuilder.withArgName("paths")
|
||||
.hasArg()
|
||||
.withDescription("comma separated archives to be unarchived" +
|
||||
" on the compute machines.")
|
||||
.create("archives");
|
||||
|
||||
// file with security tokens
|
||||
Option tokensFile = OptionBuilder.withArgName("tokensFile")
|
||||
.hasArg()
|
||||
.withDescription("name of the file with the tokens")
|
||||
.create("tokenCacheFile");
|
||||
protected Options buildGeneralOptions(Options opts) {
|
||||
synchronized (OptionBuilder.class) {
|
||||
Option fs = OptionBuilder.withArgName("file:///|hdfs://namenode:port")
|
||||
.hasArg()
|
||||
.withDescription("specify default filesystem URL to use, "
|
||||
+ "overrides 'fs.defaultFS' property from configurations.")
|
||||
.create("fs");
|
||||
Option jt = OptionBuilder.withArgName("local|resourcemanager:port")
|
||||
.hasArg()
|
||||
.withDescription("specify a ResourceManager")
|
||||
.create("jt");
|
||||
Option oconf = OptionBuilder.withArgName("configuration file")
|
||||
.hasArg()
|
||||
.withDescription("specify an application configuration file")
|
||||
.create("conf");
|
||||
Option property = OptionBuilder.withArgName("property=value")
|
||||
.hasArg()
|
||||
.withDescription("use value for given property")
|
||||
.create('D');
|
||||
Option libjars = OptionBuilder.withArgName("paths")
|
||||
.hasArg()
|
||||
.withDescription(
|
||||
"comma separated jar files to include in the classpath.")
|
||||
.create("libjars");
|
||||
Option files = OptionBuilder.withArgName("paths")
|
||||
.hasArg()
|
||||
.withDescription("comma separated files to be copied to the " +
|
||||
"map reduce cluster")
|
||||
.create("files");
|
||||
Option archives = OptionBuilder.withArgName("paths")
|
||||
.hasArg()
|
||||
.withDescription("comma separated archives to be unarchived" +
|
||||
" on the compute machines.")
|
||||
.create("archives");
|
||||
|
||||
opts.addOption(fs);
|
||||
opts.addOption(jt);
|
||||
opts.addOption(oconf);
|
||||
opts.addOption(property);
|
||||
opts.addOption(libjars);
|
||||
opts.addOption(files);
|
||||
opts.addOption(archives);
|
||||
opts.addOption(tokensFile);
|
||||
// file with security tokens
|
||||
Option tokensFile = OptionBuilder.withArgName("tokensFile")
|
||||
.hasArg()
|
||||
.withDescription("name of the file with the tokens")
|
||||
.create("tokenCacheFile");
|
||||
|
||||
return opts;
|
||||
|
||||
opts.addOption(fs);
|
||||
opts.addOption(jt);
|
||||
opts.addOption(oconf);
|
||||
opts.addOption(property);
|
||||
opts.addOption(libjars);
|
||||
opts.addOption(files);
|
||||
opts.addOption(archives);
|
||||
opts.addOption(tokensFile);
|
||||
|
||||
return opts;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -368,7 +381,7 @@ public static URL[] getLibJars(Configuration conf) throws IOException {
|
||||
}
|
||||
|
||||
/**
|
||||
* takes input as a comma separated list of files
|
||||
* Takes input as a comma separated list of files
|
||||
* and verifies if they exist. It defaults for file:///
|
||||
* if the files specified do not have a scheme.
|
||||
* it returns the paths uri converted defaulting to file:///.
|
||||
@ -543,20 +556,24 @@ private String[] preProcessForWindows(String[] args) {
|
||||
*
|
||||
* @param opts Options to use for parsing args.
|
||||
* @param args User-specified arguments
|
||||
* @return true if the parse was successful
|
||||
*/
|
||||
private void parseGeneralOptions(Options opts, String[] args)
|
||||
private boolean parseGeneralOptions(Options opts, String[] args)
|
||||
throws IOException {
|
||||
opts = buildGeneralOptions(opts);
|
||||
CommandLineParser parser = new GnuParser();
|
||||
boolean parsed = false;
|
||||
try {
|
||||
commandLine = parser.parse(opts, preProcessForWindows(args), true);
|
||||
processGeneralOptions(commandLine);
|
||||
parsed = true;
|
||||
} catch(ParseException e) {
|
||||
LOG.warn("options parsing failed: "+e.getMessage());
|
||||
|
||||
HelpFormatter formatter = new HelpFormatter();
|
||||
formatter.printHelp("general options are: ", opts);
|
||||
}
|
||||
return parsed;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -440,10 +440,11 @@ public static Collection<String> getTrimmedStringCollection(String str){
|
||||
}
|
||||
|
||||
/**
|
||||
* Splits a comma separated value <code>String</code>, trimming leading and
|
||||
* trailing whitespace on each value.
|
||||
* Splits a comma or newline separated value <code>String</code>, trimming
|
||||
* leading and trailing whitespace on each value.
|
||||
*
|
||||
* @param str a comma separated <code>String</code> with values, may be null
|
||||
* @param str a comma or newline separated <code>String</code> with values,
|
||||
* may be null
|
||||
* @return an array of <code>String</code> values, empty array if null String
|
||||
* input
|
||||
*/
|
||||
@ -452,7 +453,7 @@ public static String[] getTrimmedStrings(String str){
|
||||
return emptyStringArray;
|
||||
}
|
||||
|
||||
return str.trim().split("\\s*,\\s*");
|
||||
return str.trim().split("\\s*[,\n]\\s*");
|
||||
}
|
||||
|
||||
final public static String[] emptyStringArray = {};
|
||||
@ -675,11 +676,11 @@ public static String unEscapeString(String str, char escapeChar,
|
||||
* @param msg content of the message
|
||||
* @return a message for logging
|
||||
*/
|
||||
private static String toStartupShutdownString(String prefix, String [] msg) {
|
||||
public static String toStartupShutdownString(String prefix, String[] msg) {
|
||||
StringBuilder b = new StringBuilder(prefix);
|
||||
b.append("\n/************************************************************");
|
||||
for(String s : msg)
|
||||
b.append("\n" + prefix + s);
|
||||
b.append("\n").append(prefix).append(s);
|
||||
b.append("\n************************************************************/");
|
||||
return b.toString();
|
||||
}
|
||||
@ -710,21 +711,7 @@ static void startupShutdownMessage(Class<?> clazz, String[] args,
|
||||
final LogAdapter LOG) {
|
||||
final String hostname = NetUtils.getHostname();
|
||||
final String classname = clazz.getSimpleName();
|
||||
LOG.info(
|
||||
toStartupShutdownString("STARTUP_MSG: ", new String[] {
|
||||
"Starting " + classname,
|
||||
" user = " + System.getProperty("user.name"),
|
||||
" host = " + hostname,
|
||||
" args = " + Arrays.asList(args),
|
||||
" version = " + VersionInfo.getVersion(),
|
||||
" classpath = " + System.getProperty("java.class.path"),
|
||||
" build = " + VersionInfo.getUrl() + " -r "
|
||||
+ VersionInfo.getRevision()
|
||||
+ "; compiled by '" + VersionInfo.getUser()
|
||||
+ "' on " + VersionInfo.getDate(),
|
||||
" java = " + System.getProperty("java.version") }
|
||||
)
|
||||
);
|
||||
LOG.info(createStartupShutdownMessage(classname, hostname, args));
|
||||
|
||||
if (SystemUtils.IS_OS_UNIX) {
|
||||
try {
|
||||
@ -744,6 +731,29 @@ public void run() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate the text for the startup/shutdown message of processes.
|
||||
* @param classname short classname of the class
|
||||
* @param hostname hostname
|
||||
* @param args Command arguments
|
||||
* @return a string to log.
|
||||
*/
|
||||
public static String createStartupShutdownMessage(String classname,
|
||||
String hostname, String[] args) {
|
||||
return toStartupShutdownString("STARTUP_MSG: ", new String[] {
|
||||
"Starting " + classname,
|
||||
" host = " + hostname,
|
||||
" args = " + Arrays.asList(args),
|
||||
" version = " + VersionInfo.getVersion(),
|
||||
" classpath = " + System.getProperty("java.class.path"),
|
||||
" build = " + VersionInfo.getUrl() + " -r "
|
||||
+ VersionInfo.getRevision()
|
||||
+ "; compiled by '" + VersionInfo.getUser()
|
||||
+ "' on " + VersionInfo.getDate(),
|
||||
" java = " + System.getProperty("java.version") }
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* The traditional binary prefixes, kilo, mega, ..., exa,
|
||||
* which can be represented by a 64-bit integer.
|
||||
|
@ -90,7 +90,7 @@ String getSystemInfoInfoFromShell() {
|
||||
return null;
|
||||
}
|
||||
|
||||
void refreshIfNeeded() {
|
||||
synchronized void refreshIfNeeded() {
|
||||
long now = now();
|
||||
if (now - lastRefreshTime > REFRESH_INTERVAL_MS) {
|
||||
long refreshInterval = now - lastRefreshTime;
|
||||
|
@ -17,3 +17,5 @@ org.apache.hadoop.fs.LocalFileSystem
|
||||
org.apache.hadoop.fs.viewfs.ViewFileSystem
|
||||
org.apache.hadoop.fs.ftp.FTPFileSystem
|
||||
org.apache.hadoop.fs.HarFileSystem
|
||||
org.apache.hadoop.fs.http.HttpFileSystem
|
||||
org.apache.hadoop.fs.http.HttpsFileSystem
|
||||
|
@ -0,0 +1,18 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory
|
||||
org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory
|
||||
org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory
|
||||
org.apache.hadoop.io.erasurecode.rawcoder.RSLegacyRawErasureCoderFactory
|
||||
org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory
|
@ -517,9 +517,19 @@
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.sensitive-config-keys</name>
|
||||
<value>secret$,password$,ssl.keystore.pass$,fs.s3.*[Ss]ecret.?[Kk]ey,fs.azure.account.key.*,dfs.webhdfs.oauth2.[a-z]+.token,hadoop.security.sensitive-config-keys</value>
|
||||
<description>A comma-separated list of regular expressions to match against
|
||||
configuration keys that should be redacted where appropriate, for
|
||||
<value>
|
||||
secret$
|
||||
password$
|
||||
ssl.keystore.pass$
|
||||
fs.s3.*[Ss]ecret.?[Kk]ey
|
||||
fs.s3a.*.server-side-encryption.key
|
||||
fs.azure.account.key.*
|
||||
credential$
|
||||
oauth.*token$
|
||||
hadoop.security.sensitive-config-keys
|
||||
</value>
|
||||
<description>A comma-separated or multi-line list of regular expressions to
|
||||
match configuration keys that should be redacted where appropriate, for
|
||||
example, when logging modified properties during a reconfiguration,
|
||||
private credentials should not be logged.
|
||||
</description>
|
||||
@ -658,7 +668,7 @@
|
||||
|
||||
<property>
|
||||
<name>io.erasurecode.codec.rs.rawcoders</name>
|
||||
<value>org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory,org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory</value>
|
||||
<value>rs_native,rs_java</value>
|
||||
<description>
|
||||
Comma separated raw coder implementations for the rs codec. The earlier
|
||||
factory is prior to followings in case of failure of creating raw coders.
|
||||
@ -667,7 +677,7 @@
|
||||
|
||||
<property>
|
||||
<name>io.erasurecode.codec.rs-legacy.rawcoders</name>
|
||||
<value>org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactoryLegacy</value>
|
||||
<value>rs-legacy_java</value>
|
||||
<description>
|
||||
Comma separated raw coder implementations for the rs-legacy codec. The earlier
|
||||
factory is prior to followings in case of failure of creating raw coders.
|
||||
@ -676,7 +686,7 @@
|
||||
|
||||
<property>
|
||||
<name>io.erasurecode.codec.xor.rawcoders</name>
|
||||
<value>org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory,org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory</value>
|
||||
<value>xor_native,xor_java</value>
|
||||
<description>
|
||||
Comma separated raw coder implementations for the xor codec. The earlier
|
||||
factory is prior to followings in case of failure of creating raw coders.
|
||||
@ -1151,7 +1161,18 @@
|
||||
<property>
|
||||
<name>fs.s3a.server-side-encryption-algorithm</name>
|
||||
<description>Specify a server-side encryption algorithm for s3a: file system.
|
||||
Unset by default, and the only other currently allowable value is AES256.
|
||||
Unset by default. It supports the following values: 'AES256' (for SSE-S3),
|
||||
'SSE-KMS' and 'SSE-C'.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.s3a.server-side-encryption.key</name>
|
||||
<description>Specific encryption key to use if fs.s3a.server-side-encryption-algorithm
|
||||
has been set to 'SSE-KMS' or 'SSE-C'. In the case of SSE-C, the value of this property
|
||||
should be the Base64 encoded key. If you are using SSE-KMS and leave this property empty,
|
||||
you'll be using your default's S3 KMS key, otherwise you should set this property to
|
||||
the specific KMS key id.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
@ -62,6 +62,7 @@ The following table lists the configuration property names that are deprecated i
|
||||
| fs.checkpoint.edits.dir | dfs.namenode.checkpoint.edits.dir |
|
||||
| fs.checkpoint.period | dfs.namenode.checkpoint.period |
|
||||
| fs.default.name | fs.defaultFS |
|
||||
| fs.s3a.server-side-encryption-key | fs.s3a.server-side-encryption.key |
|
||||
| hadoop.configured.node.mapping | net.topology.configured.node.mapping |
|
||||
| hadoop.native.lib | io.native.lib.available |
|
||||
| hadoop.net.static.resolutions | mapreduce.tasktracker.net.static.resolutions |
|
||||
|
@ -626,7 +626,6 @@ Sets an extended attribute name and value for a file or directory.
|
||||
|
||||
Options:
|
||||
|
||||
* -b: Remove all but the base ACL entries. The entries for user, group and others are retained for compatibility with permission bits.
|
||||
* -n name: The extended attribute name.
|
||||
* -v value: The extended attribute value. There are three different encoding methods for the value. If the argument is enclosed in double quotes, then the value is the string inside the quotes. If the argument is prefixed with 0x or 0X, then it is taken as a hexadecimal number. If the argument begins with 0s or 0S, then it is taken as a base64 encoding.
|
||||
* -x name: Remove the extended attribute.
|
||||
|
@ -334,8 +334,8 @@ FsVolume
|
||||
|
||||
Per-volume metrics contain Datanode Volume IO related statistics. Per-volume
|
||||
metrics are off by default. They can be enabled by setting `dfs.datanode
|
||||
.fileio.profiling.sampling.fraction` to a fraction between 0.0 and 1.0.
|
||||
Setting this value to 0.0 would mean profiling is not enabled. But enabling
|
||||
.fileio.profiling.percentage.fraction` to an integer value between 1 and 100.
|
||||
Setting this value to 0 would mean profiling is not enabled. But enabling
|
||||
per-volume metrics may have a performance impact. Each metrics record
|
||||
contains tags such as Hostname as additional information along with metrics.
|
||||
|
||||
|
@ -91,12 +91,12 @@ sys.argv.pop(0) # discard name
|
||||
netmask = '255.255.255.0' # set netmask to what's being used in your environment. The example uses a /24
|
||||
|
||||
for ip in sys.argv: # loop over list of datanode IP's
|
||||
address = '{0}/{1}'.format(ip, netmask) # format address string so it looks like 'ip/netmask' to make netaddr work
|
||||
try:
|
||||
network_address = netaddr.IPNetwork(address).network # calculate and print network address
|
||||
print "/{0}".format(network_address)
|
||||
except:
|
||||
print "/rack-unknown" # print catch-all value if unable to calculate network address
|
||||
address = '{0}/{1}'.format(ip, netmask) # format address string so it looks like 'ip/netmask' to make netaddr work
|
||||
try:
|
||||
network_address = netaddr.IPNetwork(address).network # calculate and print network address
|
||||
print "/{0}".format(network_address)
|
||||
except:
|
||||
print "/rack-unknown" # print catch-all value if unable to calculate network address
|
||||
```
|
||||
|
||||
bash Example
|
||||
|
@ -34,15 +34,32 @@ public class TestConfigRedactor {
|
||||
private static final String ORIGINAL_VALUE = "Hello, World!";
|
||||
|
||||
@Test
|
||||
public void redact() throws Exception {
|
||||
public void testRedactWithCoreDefault() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
testRedact(conf);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRedactNoCoreDefault() throws Exception {
|
||||
Configuration conf = new Configuration(false);
|
||||
testRedact(conf);
|
||||
}
|
||||
|
||||
private void testRedact(Configuration conf) throws Exception {
|
||||
ConfigRedactor redactor = new ConfigRedactor(conf);
|
||||
String processedText;
|
||||
|
||||
List<String> sensitiveKeys = Arrays.asList(
|
||||
"fs.s3a.secret.key",
|
||||
"fs.s3a.bucket.BUCKET.secret.key",
|
||||
"fs.s3a.server-side-encryption.key",
|
||||
"fs.s3a.bucket.engineering.server-side-encryption.key",
|
||||
"fs.s3n.awsSecretKey",
|
||||
"fs.azure.account.key.abcdefg.blob.core.windows.net",
|
||||
"fs.adl.oauth2.refresh.token",
|
||||
"fs.adl.oauth2.credential",
|
||||
"dfs.adls.oauth2.refresh.token",
|
||||
"dfs.adls.oauth2.credential",
|
||||
"dfs.webhdfs.oauth2.access.token",
|
||||
"dfs.webhdfs.oauth2.refresh.token",
|
||||
"ssl.server.keystore.keypassword",
|
||||
@ -62,7 +79,8 @@ public void redact() throws Exception {
|
||||
"dfs.replication",
|
||||
"ssl.server.keystore.location",
|
||||
"httpfs.config.dir",
|
||||
"hadoop.security.credstore.java-keystore-provider.password-file"
|
||||
"hadoop.security.credstore.java-keystore-provider.password-file",
|
||||
"fs.s3a.bucket.engineering.server-side-encryption-algorithm"
|
||||
);
|
||||
for (String key : normalKeys) {
|
||||
processedText = redactor.redact(key, ORIGINAL_VALUE);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/** when(p1.getKMSUrl()).thenReturn("p1");
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -41,7 +41,9 @@
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.Timeout;
|
||||
import org.mockito.internal.util.reflection.Whitebox;
|
||||
|
||||
|
||||
@ -57,6 +59,17 @@ public class TestLocalFileSystem {
|
||||
private Configuration conf;
|
||||
private LocalFileSystem fileSys;
|
||||
|
||||
/**
|
||||
* standard test timeout: {@value}.
|
||||
*/
|
||||
public static final int DEFAULT_TEST_TIMEOUT = 60 * 1000;
|
||||
|
||||
/**
|
||||
* Set the timeout for every test.
|
||||
*/
|
||||
@Rule
|
||||
public Timeout testTimeout = new Timeout(DEFAULT_TEST_TIMEOUT);
|
||||
|
||||
private void cleanupFile(FileSystem fs, Path name) throws IOException {
|
||||
assertTrue(fs.exists(name));
|
||||
fs.delete(name, true);
|
||||
@ -82,7 +95,7 @@ public void after() throws IOException {
|
||||
/**
|
||||
* Test the capability of setting the working directory.
|
||||
*/
|
||||
@Test(timeout = 10000)
|
||||
@Test
|
||||
public void testWorkingDirectory() throws IOException {
|
||||
Path origDir = fileSys.getWorkingDirectory();
|
||||
Path subdir = new Path(TEST_ROOT_DIR, "new");
|
||||
@ -136,7 +149,7 @@ public void testWorkingDirectory() throws IOException {
|
||||
* test Syncable interface on raw local file system
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test(timeout = 1000)
|
||||
@Test
|
||||
public void testSyncable() throws IOException {
|
||||
FileSystem fs = fileSys.getRawFileSystem();
|
||||
Path file = new Path(TEST_ROOT_DIR, "syncable");
|
||||
@ -169,7 +182,7 @@ private void verifyFile(FileSystem fs, Path file, int bytesToVerify,
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 10000)
|
||||
@Test
|
||||
public void testCopy() throws IOException {
|
||||
Path src = new Path(TEST_ROOT_DIR, "dingo");
|
||||
Path dst = new Path(TEST_ROOT_DIR, "yak");
|
||||
@ -195,7 +208,7 @@ public void testCopy() throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
@Test
|
||||
public void testHomeDirectory() throws IOException {
|
||||
Path home = new Path(System.getProperty("user.home"))
|
||||
.makeQualified(fileSys);
|
||||
@ -203,7 +216,7 @@ public void testHomeDirectory() throws IOException {
|
||||
assertEquals(home, fsHome);
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
@Test
|
||||
public void testPathEscapes() throws IOException {
|
||||
Path path = new Path(TEST_ROOT_DIR, "foo%bar");
|
||||
writeFile(fileSys, path, 1);
|
||||
@ -212,7 +225,7 @@ public void testPathEscapes() throws IOException {
|
||||
cleanupFile(fileSys, path);
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
@Test
|
||||
public void testCreateFileAndMkdirs() throws IOException {
|
||||
Path test_dir = new Path(TEST_ROOT_DIR, "test_dir");
|
||||
Path test_file = new Path(test_dir, "file1");
|
||||
@ -248,7 +261,7 @@ public void testCreateFileAndMkdirs() throws IOException {
|
||||
}
|
||||
|
||||
/** Test deleting a file, directory, and non-existent path */
|
||||
@Test(timeout = 1000)
|
||||
@Test
|
||||
public void testBasicDelete() throws IOException {
|
||||
Path dir1 = new Path(TEST_ROOT_DIR, "dir1");
|
||||
Path file1 = new Path(TEST_ROOT_DIR, "file1");
|
||||
@ -263,7 +276,7 @@ public void testBasicDelete() throws IOException {
|
||||
assertTrue("Did not delete non-empty dir", fileSys.delete(dir1));
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
@Test
|
||||
public void testStatistics() throws Exception {
|
||||
int fileSchemeCount = 0;
|
||||
for (Statistics stats : FileSystem.getAllStatistics()) {
|
||||
@ -274,7 +287,7 @@ public void testStatistics() throws Exception {
|
||||
assertEquals(1, fileSchemeCount);
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
@Test
|
||||
public void testHasFileDescriptor() throws IOException {
|
||||
Path path = new Path(TEST_ROOT_DIR, "test-file");
|
||||
writeFile(fileSys, path, 1);
|
||||
@ -288,7 +301,7 @@ public void testHasFileDescriptor() throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
@Test
|
||||
public void testListStatusWithColons() throws IOException {
|
||||
assumeNotWindows();
|
||||
File colonFile = new File(TEST_ROOT_DIR, "foo:bar");
|
||||
@ -314,7 +327,7 @@ public void testListStatusReturnConsistentPathOnWindows() throws IOException {
|
||||
stats[0].getPath().toUri().getPath());
|
||||
}
|
||||
|
||||
@Test(timeout = 10000)
|
||||
@Test
|
||||
public void testReportChecksumFailure() throws IOException {
|
||||
base.mkdirs();
|
||||
assertTrue(base.exists() && base.isDirectory());
|
||||
@ -394,7 +407,7 @@ private void checkTimesStatus(Path path,
|
||||
assertEquals(expectedAccTime, status.getAccessTime());
|
||||
}
|
||||
|
||||
@Test(timeout = 1000)
|
||||
@Test
|
||||
public void testSetTimes() throws Exception {
|
||||
Path path = new Path(TEST_ROOT_DIR, "set-times");
|
||||
writeFile(fileSys, path, 1);
|
||||
|
@ -105,7 +105,7 @@ public void testStatDanglingLink() throws IOException {
|
||||
super.testStatDanglingLink();
|
||||
}
|
||||
|
||||
@Test(timeout=1000)
|
||||
@Test(timeout=10000)
|
||||
/** lstat a non-existant file using a partially qualified path */
|
||||
public void testDanglingLinkFilePartQual() throws IOException {
|
||||
Path filePartQual = new Path(getScheme()+":///doesNotExist");
|
||||
@ -123,7 +123,7 @@ public void testDanglingLinkFilePartQual() throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=1000)
|
||||
@Test(timeout=10000)
|
||||
/** Stat and lstat a dangling link */
|
||||
public void testDanglingLink() throws IOException {
|
||||
assumeNotWindows();
|
||||
@ -169,7 +169,7 @@ public void testDanglingLink() throws IOException {
|
||||
wrapper.getFileStatus(link);
|
||||
}
|
||||
|
||||
@Test(timeout=1000)
|
||||
@Test(timeout=10000)
|
||||
/**
|
||||
* Test getLinkTarget with a partially qualified target.
|
||||
* NB: Hadoop does not support fully qualified URIs for the
|
||||
|
@ -34,7 +34,7 @@ public RawlocalFSContract(Configuration conf) {
|
||||
super(conf);
|
||||
}
|
||||
|
||||
public static final String RAW_CONTRACT_XML = "contract/localfs.xml";
|
||||
public static final String RAW_CONTRACT_XML = "contract/rawlocal.xml";
|
||||
|
||||
@Override
|
||||
protected String getContractXml() {
|
||||
|
@ -0,0 +1,67 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.http;
|
||||
|
||||
import okhttp3.mockwebserver.MockResponse;
|
||||
import okhttp3.mockwebserver.MockWebServer;
|
||||
import okhttp3.mockwebserver.RecordedRequest;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* Testing HttpFileSystem.
|
||||
*/
|
||||
public class TestHttpFileSystem {
|
||||
@Test
|
||||
public void testHttpFileSystem() throws IOException, URISyntaxException,
|
||||
InterruptedException {
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.set("fs.http.impl", HttpFileSystem.class.getCanonicalName());
|
||||
final String data = "foo";
|
||||
|
||||
try (MockWebServer server = new MockWebServer()) {
|
||||
server.enqueue(new MockResponse().setBody(data));
|
||||
server.start();
|
||||
URI uri = URI.create(String.format("http://%s:%d", server.getHostName(),
|
||||
server.getPort()));
|
||||
FileSystem fs = FileSystem.get(uri, conf);
|
||||
try (InputStream is = fs.open(
|
||||
new Path(new URL(uri.toURL(), "/foo").toURI()),
|
||||
4096)) {
|
||||
byte[] buf = new byte[data.length()];
|
||||
IOUtils.readFully(is, buf, 0, buf.length);
|
||||
assertEquals(data, new String(buf, StandardCharsets.UTF_8));
|
||||
}
|
||||
RecordedRequest req = server.takeRequest();
|
||||
assertEquals("/foo", req.getPath());
|
||||
}
|
||||
}
|
||||
}
|
@ -251,7 +251,7 @@ private void testOperationsThroughMountLinksInternal(boolean located)
|
||||
fsTarget.isFile(new Path(targetTestRoot,"user/foo")));
|
||||
|
||||
// Delete the created file
|
||||
Assert.assertTrue("Delete should suceed",
|
||||
Assert.assertTrue("Delete should succeed",
|
||||
fsView.delete(new Path("/user/foo"), false));
|
||||
Assert.assertFalse("File should not exist after delete",
|
||||
fsView.exists(new Path("/user/foo")));
|
||||
@ -266,7 +266,7 @@ private void testOperationsThroughMountLinksInternal(boolean located)
|
||||
fsTarget.isFile(new Path(targetTestRoot,"dir2/foo")));
|
||||
|
||||
// Delete the created file
|
||||
Assert.assertTrue("Delete should suceed",
|
||||
Assert.assertTrue("Delete should succeed",
|
||||
fsView.delete(new Path("/internalDir/linkToDir2/foo"), false));
|
||||
Assert.assertFalse("File should not exist after delete",
|
||||
fsView.exists(new Path("/internalDir/linkToDir2/foo")));
|
||||
@ -370,7 +370,7 @@ private void testOperationsThroughMountLinksInternal(boolean located)
|
||||
public void testRenameAcrossMounts1() throws IOException {
|
||||
fileSystemTestHelper.createFile(fsView, "/user/foo");
|
||||
fsView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
|
||||
/* - code if we had wanted this to suceed
|
||||
/* - code if we had wanted this to succeed
|
||||
Assert.assertFalse(fSys.exists(new Path("/user/foo")));
|
||||
Assert.assertFalse(fSysLocal.exists(new Path(targetTestRoot,"user/foo")));
|
||||
Assert.assertTrue(fSys.isFile(FileSystemTestHelper.getTestRootPath(fSys,"/user2/fooBarBar")));
|
||||
|
@ -232,7 +232,7 @@ public void testOperationsThroughMountLinks() throws IOException {
|
||||
isFile(fcTarget, new Path(targetTestRoot,"dir2/foo")));
|
||||
|
||||
// Delete the created file
|
||||
Assert.assertTrue("Delete should suceed",
|
||||
Assert.assertTrue("Delete should succeed",
|
||||
fcView.delete(new Path("/internalDir/linkToDir2/foo"),false));
|
||||
Assert.assertFalse("File should not exist after deletion",
|
||||
exists(fcView, new Path("/internalDir/linkToDir2/foo")));
|
||||
|
@ -41,6 +41,7 @@
|
||||
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
||||
import org.apache.hadoop.security.ssl.SSLFactory;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
@ -62,21 +63,21 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
|
||||
private static String sslConfDir;
|
||||
private static SSLFactory clientSslFactory;
|
||||
private static final String excludeCiphers = "TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
|
||||
+ "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,"
|
||||
+ "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
|
||||
+ "SSL_RSA_WITH_DES_CBC_SHA,"
|
||||
+ "SSL_DHE_RSA_WITH_DES_CBC_SHA,"
|
||||
+ "SSL_RSA_EXPORT_WITH_RC4_40_MD5,"
|
||||
+ "SSL_DHE_RSA_WITH_DES_CBC_SHA, "
|
||||
+ "SSL_RSA_EXPORT_WITH_RC4_40_MD5,\t \n"
|
||||
+ "SSL_RSA_EXPORT_WITH_DES40_CBC_SHA,"
|
||||
+ "SSL_RSA_WITH_RC4_128_MD5";
|
||||
+ "SSL_RSA_WITH_RC4_128_MD5 \t";
|
||||
private static final String oneEnabledCiphers = excludeCiphers
|
||||
+ ",TLS_RSA_WITH_AES_128_CBC_SHA";
|
||||
private static final String exclusiveEnabledCiphers
|
||||
= "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,"
|
||||
= "\tTLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, \n"
|
||||
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,"
|
||||
+ "TLS_RSA_WITH_AES_128_CBC_SHA,"
|
||||
+ "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,"
|
||||
+ "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, "
|
||||
+ "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,"
|
||||
+ "TLS_DHE_RSA_WITH_AES_128_CBC_SHA,"
|
||||
+ "TLS_DHE_RSA_WITH_AES_128_CBC_SHA,\t\n "
|
||||
+ "TLS_DHE_DSS_WITH_AES_128_CBC_SHA";
|
||||
|
||||
@BeforeClass
|
||||
@ -169,7 +170,7 @@ public void testExcludedCiphers() throws Exception {
|
||||
SSLSocketFactory sslSocketF = clientSslFactory.createSSLSocketFactory();
|
||||
PrefferedCipherSSLSocketFactory testPreferredCipherSSLSocketF
|
||||
= new PrefferedCipherSSLSocketFactory(sslSocketF,
|
||||
excludeCiphers.split(","));
|
||||
StringUtils.getTrimmedStrings(excludeCiphers));
|
||||
conn.setSSLSocketFactory(testPreferredCipherSSLSocketF);
|
||||
assertFalse("excludedCipher list is empty", excludeCiphers.isEmpty());
|
||||
try {
|
||||
@ -193,7 +194,7 @@ public void testOneEnabledCiphers() throws Exception {
|
||||
SSLSocketFactory sslSocketF = clientSslFactory.createSSLSocketFactory();
|
||||
PrefferedCipherSSLSocketFactory testPreferredCipherSSLSocketF
|
||||
= new PrefferedCipherSSLSocketFactory(sslSocketF,
|
||||
oneEnabledCiphers.split(","));
|
||||
StringUtils.getTrimmedStrings(oneEnabledCiphers));
|
||||
conn.setSSLSocketFactory(testPreferredCipherSSLSocketF);
|
||||
assertFalse("excludedCipher list is empty", oneEnabledCiphers.isEmpty());
|
||||
try {
|
||||
@ -219,7 +220,7 @@ public void testExclusiveEnabledCiphers() throws Exception {
|
||||
SSLSocketFactory sslSocketF = clientSslFactory.createSSLSocketFactory();
|
||||
PrefferedCipherSSLSocketFactory testPreferredCipherSSLSocketF
|
||||
= new PrefferedCipherSSLSocketFactory(sslSocketF,
|
||||
exclusiveEnabledCiphers.split(","));
|
||||
StringUtils.getTrimmedStrings(exclusiveEnabledCiphers));
|
||||
conn.setSSLSocketFactory(testPreferredCipherSSLSocketF);
|
||||
assertFalse("excludedCipher list is empty",
|
||||
exclusiveEnabledCiphers.isEmpty());
|
||||
|
@ -20,14 +20,15 @@
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoderLegacy;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSLegacyRawDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawEncoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawEncoderLegacy;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSLegacyRawEncoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.XORRawDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.XORRawEncoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
@ -62,10 +63,10 @@ public void testRSDefaultRawCoder() {
|
||||
// should return default raw coder of rs-legacy codec
|
||||
encoder = CodecUtil.createRawEncoder(conf,
|
||||
ErasureCodeConstants.RS_LEGACY_CODEC_NAME, coderOptions);
|
||||
Assert.assertTrue(encoder instanceof RSRawEncoderLegacy);
|
||||
Assert.assertTrue(encoder instanceof RSLegacyRawEncoder);
|
||||
decoder = CodecUtil.createRawDecoder(conf,
|
||||
ErasureCodeConstants.RS_LEGACY_CODEC_NAME, coderOptions);
|
||||
Assert.assertTrue(decoder instanceof RSRawDecoderLegacy);
|
||||
Assert.assertTrue(decoder instanceof RSLegacyRawDecoder);
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -104,8 +105,8 @@ public void testFallbackCoders() {
|
||||
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
|
||||
numDataUnit, numParityUnit);
|
||||
conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
|
||||
RSRawErasureCoderFactory.class.getCanonicalName() +
|
||||
"," + NativeRSRawErasureCoderFactory.class.getCanonicalName());
|
||||
RSRawErasureCoderFactory.CODER_NAME +
|
||||
"," + NativeRSRawErasureCoderFactory.CODER_NAME);
|
||||
// should return default raw coder of rs codec
|
||||
RawErasureEncoder encoder = CodecUtil.createRawEncoder(
|
||||
conf, ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
|
||||
@ -122,10 +123,10 @@ public void testLegacyCodecFallback() {
|
||||
// should return default raw coder of rs-legacy codec
|
||||
RawErasureEncoder encoder = CodecUtil.createRawEncoder(
|
||||
conf, ErasureCodeConstants.RS_LEGACY_CODEC_NAME, coderOptions);
|
||||
Assert.assertTrue(encoder instanceof RSRawEncoderLegacy);
|
||||
Assert.assertTrue(encoder instanceof RSLegacyRawEncoder);
|
||||
RawErasureDecoder decoder = CodecUtil.createRawDecoder(
|
||||
conf, ErasureCodeConstants.RS_LEGACY_CODEC_NAME, coderOptions);
|
||||
Assert.assertTrue(decoder instanceof RSRawDecoderLegacy);
|
||||
Assert.assertTrue(decoder instanceof RSLegacyRawDecoder);
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -133,8 +134,7 @@ public void testIgnoreInvalidCodec() {
|
||||
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
|
||||
numDataUnit, numParityUnit);
|
||||
conf.set(CodecUtil.IO_ERASURECODE_CODEC_XOR_RAWCODERS_KEY,
|
||||
"invalid-codec," +
|
||||
"org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory");
|
||||
"invalid-codec," + XORRawErasureCoderFactory.CODER_NAME);
|
||||
// should return second coder specified by IO_ERASURECODE_CODEC_CODERS
|
||||
RawErasureEncoder encoder = CodecUtil.createRawEncoder(
|
||||
conf, ErasureCodeConstants.XOR_CODEC_NAME, coderOptions);
|
||||
|
@ -0,0 +1,170 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io.erasurecode;
|
||||
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSLegacyRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Test CodecRegistry.
|
||||
*/
|
||||
public class TestCodecRegistry {
|
||||
@Test
|
||||
public void testGetCodecs() {
|
||||
Set<String> codecs = CodecRegistry.getInstance().getCodecNames();
|
||||
assertEquals(3, codecs.size());
|
||||
assertTrue(codecs.contains(ErasureCodeConstants.RS_CODEC_NAME));
|
||||
assertTrue(codecs.contains(ErasureCodeConstants.RS_LEGACY_CODEC_NAME));
|
||||
assertTrue(codecs.contains(ErasureCodeConstants.XOR_CODEC_NAME));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCoders() {
|
||||
List<RawErasureCoderFactory> coders = CodecRegistry.getInstance().
|
||||
getCoders(ErasureCodeConstants.RS_CODEC_NAME);
|
||||
assertEquals(2, coders.size());
|
||||
assertTrue(coders.get(0) instanceof NativeRSRawErasureCoderFactory);
|
||||
assertTrue(coders.get(1) instanceof RSRawErasureCoderFactory);
|
||||
|
||||
coders = CodecRegistry.getInstance().
|
||||
getCoders(ErasureCodeConstants.RS_LEGACY_CODEC_NAME);
|
||||
assertEquals(1, coders.size());
|
||||
assertTrue(coders.get(0) instanceof RSLegacyRawErasureCoderFactory);
|
||||
|
||||
coders = CodecRegistry.getInstance().
|
||||
getCoders(ErasureCodeConstants.XOR_CODEC_NAME);
|
||||
assertEquals(2, coders.size());
|
||||
assertTrue(coders.get(0) instanceof NativeXORRawErasureCoderFactory);
|
||||
assertTrue(coders.get(1) instanceof XORRawErasureCoderFactory);
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testGetCodersWrong() {
|
||||
List<RawErasureCoderFactory> coders =
|
||||
CodecRegistry.getInstance().getCoders("WRONG_CODEC");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCoderNames() {
|
||||
String[] coderNames = CodecRegistry.getInstance().
|
||||
getCoderNames(ErasureCodeConstants.RS_CODEC_NAME);
|
||||
assertEquals(2, coderNames.length);
|
||||
assertEquals(NativeRSRawErasureCoderFactory.CODER_NAME, coderNames[0]);
|
||||
assertEquals(RSRawErasureCoderFactory.CODER_NAME, coderNames[1]);
|
||||
|
||||
coderNames = CodecRegistry.getInstance().
|
||||
getCoderNames(ErasureCodeConstants.RS_LEGACY_CODEC_NAME);
|
||||
assertEquals(1, coderNames.length);
|
||||
assertEquals(RSLegacyRawErasureCoderFactory.CODER_NAME,
|
||||
coderNames[0]);
|
||||
|
||||
coderNames = CodecRegistry.getInstance().
|
||||
getCoderNames(ErasureCodeConstants.XOR_CODEC_NAME);
|
||||
assertEquals(2, coderNames.length);
|
||||
assertEquals(NativeXORRawErasureCoderFactory.CODER_NAME,
|
||||
coderNames[0]);
|
||||
assertEquals(XORRawErasureCoderFactory.CODER_NAME, coderNames[1]);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCoderByName() {
|
||||
RawErasureCoderFactory coder = CodecRegistry.getInstance().
|
||||
getCoderByName(ErasureCodeConstants.RS_CODEC_NAME,
|
||||
RSRawErasureCoderFactory.CODER_NAME);
|
||||
assertTrue(coder instanceof RSRawErasureCoderFactory);
|
||||
|
||||
coder = CodecRegistry.getInstance().getCoderByName(
|
||||
ErasureCodeConstants.RS_CODEC_NAME,
|
||||
NativeRSRawErasureCoderFactory.CODER_NAME);
|
||||
assertTrue(coder instanceof NativeRSRawErasureCoderFactory);
|
||||
|
||||
coder = CodecRegistry.getInstance().getCoderByName(
|
||||
ErasureCodeConstants.RS_LEGACY_CODEC_NAME,
|
||||
RSLegacyRawErasureCoderFactory.CODER_NAME);
|
||||
assertTrue(coder instanceof RSLegacyRawErasureCoderFactory);
|
||||
|
||||
coder = CodecRegistry.getInstance().getCoderByName(
|
||||
ErasureCodeConstants.XOR_CODEC_NAME,
|
||||
XORRawErasureCoderFactory.CODER_NAME);
|
||||
assertTrue(coder instanceof XORRawErasureCoderFactory);
|
||||
|
||||
coder = CodecRegistry.getInstance().getCoderByName(
|
||||
ErasureCodeConstants.XOR_CODEC_NAME,
|
||||
NativeXORRawErasureCoderFactory.CODER_NAME);
|
||||
assertTrue(coder instanceof NativeXORRawErasureCoderFactory);
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testGetCoderByNameWrong() {
|
||||
RawErasureCoderFactory coder = CodecRegistry.getInstance().
|
||||
getCoderByName(ErasureCodeConstants.RS_CODEC_NAME, "WRONG_RS");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateCoders() {
|
||||
class RSUserDefinedIncorrectFactory implements RawErasureCoderFactory {
|
||||
public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
|
||||
return null;
|
||||
}
|
||||
|
||||
public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
|
||||
return null;
|
||||
}
|
||||
|
||||
public String getCoderName() {
|
||||
return "rs_java";
|
||||
}
|
||||
|
||||
public String getCodecName() {
|
||||
return ErasureCodeConstants.RS_CODEC_NAME;
|
||||
}
|
||||
}
|
||||
|
||||
List<RawErasureCoderFactory> userDefinedFactories = new ArrayList<>();
|
||||
userDefinedFactories.add(new RSUserDefinedIncorrectFactory());
|
||||
CodecRegistry.getInstance().updateCoders(userDefinedFactories);
|
||||
|
||||
// check RS coders
|
||||
List<RawErasureCoderFactory> rsCoders = CodecRegistry.getInstance().
|
||||
getCoders(ErasureCodeConstants.RS_CODEC_NAME);
|
||||
assertEquals(2, rsCoders.size());
|
||||
assertTrue(rsCoders.get(0) instanceof NativeRSRawErasureCoderFactory);
|
||||
assertTrue(rsCoders.get(1) instanceof RSRawErasureCoderFactory);
|
||||
|
||||
// check RS coder names
|
||||
String[] rsCoderNames = CodecRegistry.getInstance().
|
||||
getCoderNames(ErasureCodeConstants.RS_CODEC_NAME);
|
||||
assertEquals(2, rsCoderNames.length);
|
||||
assertEquals(NativeRSRawErasureCoderFactory.CODER_NAME, rsCoderNames[0]);
|
||||
assertEquals(RSRawErasureCoderFactory.CODER_NAME, rsCoderNames[1]);
|
||||
}
|
||||
}
|
@ -22,13 +22,16 @@
|
||||
import org.junit.rules.Timeout;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
public class TestECSchema {
|
||||
|
||||
@Rule
|
||||
public Timeout globalTimeout = new Timeout(300000);
|
||||
@Rule
|
||||
public Timeout globalTimeout = new Timeout(300000);
|
||||
|
||||
@Test
|
||||
public void testGoodSchema() {
|
||||
@ -51,5 +54,45 @@ public void testGoodSchema() {
|
||||
assertEquals(numParityUnits, schema.getNumParityUnits());
|
||||
assertEquals(codec, schema.getCodecName());
|
||||
assertEquals(extraOptionValue, schema.getExtraOptions().get(extraOption));
|
||||
|
||||
Map<String, String> extraMap = new TreeMap<>();
|
||||
extraMap.put(extraOption, extraOptionValue);
|
||||
ECSchema sameSchema = new ECSchema(codec, numDataUnits, numParityUnits,
|
||||
extraMap);
|
||||
assertEquals("Different constructors not equal", sameSchema, schema);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEqualsAndHashCode() {
|
||||
Map<String, String> extraMap = new TreeMap<>();
|
||||
extraMap.put("key", "value");
|
||||
|
||||
ECSchema[] schemas = new ECSchema[]{
|
||||
new ECSchema("one", 1, 2, null),
|
||||
new ECSchema("two", 1, 2, null),
|
||||
new ECSchema("one", 2, 2, null),
|
||||
new ECSchema("one", 1, 1, null),
|
||||
new ECSchema("one", 1, 2, extraMap),
|
||||
};
|
||||
|
||||
for (int i = 0; i < schemas.length; i++) {
|
||||
final ECSchema ei = schemas[i];
|
||||
// Check identity
|
||||
ECSchema temp = new ECSchema(ei.getCodecName(), ei.getNumDataUnits(),
|
||||
ei.getNumParityUnits(), ei.getExtraOptions());
|
||||
assertEquals(ei, temp);
|
||||
assertEquals(ei.hashCode(), temp.hashCode());
|
||||
// Check against other schemas
|
||||
for (int j = 0; j < schemas.length; j++) {
|
||||
final ECSchema ej = schemas[j];
|
||||
if (i == j) {
|
||||
assertEquals(ei, ej);
|
||||
assertEquals(ei.hashCode(), ej.hashCode());
|
||||
} else {
|
||||
assertNotEquals(ei, ej);
|
||||
assertNotEquals(ei, ej.hashCode());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ public void testCodingDirectBufferWithConf_10x4_erasing_d0() {
|
||||
*/
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
|
||||
RSRawErasureCoderFactory.class.getCanonicalName());
|
||||
RSRawErasureCoderFactory.CODER_NAME);
|
||||
prepare(conf, 10, 4, new int[]{0}, new int[0]);
|
||||
|
||||
testCoding(true);
|
||||
|
@ -58,7 +58,7 @@ public void testCodingDirectBufferWithConf_10x4_erasing_d0() {
|
||||
*/
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
|
||||
RSRawErasureCoderFactory.class.getCanonicalName());
|
||||
RSRawErasureCoderFactory.CODER_NAME);
|
||||
prepare(conf, 10, 4, new int[]{0}, new int[0]);
|
||||
|
||||
testCoding(true);
|
||||
|
@ -60,7 +60,7 @@ private RawErasureCoderBenchmark() {
|
||||
private static final List<RawErasureCoderFactory> CODER_MAKERS =
|
||||
Collections.unmodifiableList(
|
||||
Arrays.asList(new DummyRawErasureCoderFactory(),
|
||||
new RSRawErasureCoderFactoryLegacy(),
|
||||
new RSLegacyRawErasureCoderFactory(),
|
||||
new RSRawErasureCoderFactory(),
|
||||
new NativeRSRawErasureCoderFactory()));
|
||||
|
||||
|
@ -29,8 +29,8 @@
|
||||
public class TestDummyRawCoder extends TestRawCoderBase {
|
||||
@Before
|
||||
public void setup() {
|
||||
encoderClass = DummyRawEncoder.class;
|
||||
decoderClass = DummyRawDecoder.class;
|
||||
encoderFactoryClass = DummyRawErasureCoderFactory.class;
|
||||
decoderFactoryClass = DummyRawErasureCoderFactory.class;
|
||||
setAllowDump(false);
|
||||
setChunkSize(baseChunkSize);
|
||||
}
|
||||
|
@ -30,8 +30,8 @@ public class TestNativeRSRawCoder extends TestRSRawCoderBase {
|
||||
@Before
|
||||
public void setup() {
|
||||
Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
|
||||
this.encoderClass = NativeRSRawEncoder.class;
|
||||
this.decoderClass = NativeRSRawDecoder.class;
|
||||
this.encoderFactoryClass = NativeRSRawErasureCoderFactory.class;
|
||||
this.decoderFactoryClass = NativeRSRawErasureCoderFactory.class;
|
||||
setAllowDump(true);
|
||||
}
|
||||
|
||||
|
@ -29,8 +29,8 @@ public class TestNativeXORRawCoder extends TestXORRawCoderBase {
|
||||
@Before
|
||||
public void setup() {
|
||||
Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
|
||||
this.encoderClass = NativeXORRawEncoder.class;
|
||||
this.decoderClass = NativeXORRawDecoder.class;
|
||||
this.encoderFactoryClass = NativeXORRawErasureCoderFactory.class;
|
||||
this.decoderFactoryClass = NativeXORRawErasureCoderFactory.class;
|
||||
setAllowDump(true);
|
||||
}
|
||||
}
|
||||
|
@ -22,12 +22,12 @@
|
||||
/**
|
||||
* Test the legacy raw Reed-solomon coder implemented in Java.
|
||||
*/
|
||||
public class TestRSRawCoderLegacy extends TestRSRawCoderBase {
|
||||
public class TestRSLegacyRawCoder extends TestRSRawCoderBase {
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
this.encoderClass = RSRawEncoderLegacy.class;
|
||||
this.decoderClass = RSRawDecoderLegacy.class;
|
||||
this.encoderFactoryClass = RSLegacyRawErasureCoderFactory.class;
|
||||
this.decoderFactoryClass = RSLegacyRawErasureCoderFactory.class;
|
||||
setAllowDump(false); // Change to true to allow verbose dump for debugging
|
||||
}
|
||||
}
|
@ -26,8 +26,8 @@ public class TestRSRawCoder extends TestRSRawCoderBase {
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
this.encoderClass = RSRawEncoder.class;
|
||||
this.decoderClass = RSRawDecoder.class;
|
||||
this.encoderFactoryClass = RSRawErasureCoderFactory.class;
|
||||
this.decoderFactoryClass = RSRawErasureCoderFactory.class;
|
||||
setAllowDump(false);
|
||||
}
|
||||
}
|
||||
|
@ -30,8 +30,8 @@ public class TestRSRawCoderInteroperable1 extends TestRSRawCoderBase {
|
||||
public void setup() {
|
||||
Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
|
||||
|
||||
this.encoderClass = RSRawEncoder.class;
|
||||
this.decoderClass = NativeRSRawDecoder.class;
|
||||
this.encoderFactoryClass = RSRawErasureCoderFactory.class;
|
||||
this.decoderFactoryClass = NativeRSRawErasureCoderFactory.class;
|
||||
setAllowDump(true);
|
||||
}
|
||||
|
||||
|
@ -30,8 +30,8 @@ public class TestRSRawCoderInteroperable2 extends TestRSRawCoderBase {
|
||||
public void setup() {
|
||||
Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
|
||||
|
||||
this.encoderClass = NativeRSRawEncoder.class;
|
||||
this.decoderClass = RSRawDecoder.class;
|
||||
this.encoderFactoryClass = NativeRSRawErasureCoderFactory.class;
|
||||
this.decoderFactoryClass = RSRawErasureCoderFactory.class;
|
||||
setAllowDump(true);
|
||||
}
|
||||
|
||||
|
@ -23,14 +23,12 @@
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
|
||||
/**
|
||||
* Raw coder test base with utilities.
|
||||
*/
|
||||
public abstract class TestRawCoderBase extends TestCoderBase {
|
||||
protected Class<? extends RawErasureEncoder> encoderClass;
|
||||
protected Class<? extends RawErasureDecoder> decoderClass;
|
||||
protected Class<? extends RawErasureCoderFactory> encoderFactoryClass;
|
||||
protected Class<? extends RawErasureCoderFactory> decoderFactoryClass;
|
||||
protected RawErasureEncoder encoder;
|
||||
protected RawErasureDecoder decoder;
|
||||
|
||||
@ -234,9 +232,8 @@ protected RawErasureEncoder createEncoder() {
|
||||
new ErasureCoderOptions(numDataUnits, numParityUnits,
|
||||
allowChangeInputs, allowDump);
|
||||
try {
|
||||
Constructor<? extends RawErasureEncoder> constructor =
|
||||
encoderClass.getConstructor(ErasureCoderOptions.class);
|
||||
return constructor.newInstance(coderConf);
|
||||
RawErasureCoderFactory factory = encoderFactoryClass.newInstance();
|
||||
return factory.createEncoder(coderConf);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Failed to create encoder", e);
|
||||
}
|
||||
@ -251,9 +248,8 @@ protected RawErasureDecoder createDecoder() {
|
||||
new ErasureCoderOptions(numDataUnits, numParityUnits,
|
||||
allowChangeInputs, allowDump);
|
||||
try {
|
||||
Constructor<? extends RawErasureDecoder> constructor =
|
||||
decoderClass.getConstructor(ErasureCoderOptions.class);
|
||||
return constructor.newInstance(coderConf);
|
||||
RawErasureCoderFactory factory = encoderFactoryClass.newInstance();
|
||||
return factory.createDecoder(coderConf);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Failed to create decoder", e);
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ public class TestXORRawCoder extends TestXORRawCoderBase {
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
this.encoderClass = XORRawEncoder.class;
|
||||
this.decoderClass = XORRawDecoder.class;
|
||||
this.encoderFactoryClass = XORRawErasureCoderFactory.class;
|
||||
this.decoderFactoryClass = XORRawErasureCoderFactory.class;
|
||||
}
|
||||
}
|
||||
|
@ -29,8 +29,8 @@ public class TestXORRawCoderInteroperable1 extends TestXORRawCoderBase {
|
||||
@Before
|
||||
public void setup() {
|
||||
Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
|
||||
this.encoderClass = XORRawEncoder.class;
|
||||
this.decoderClass = NativeXORRawDecoder.class;
|
||||
this.encoderFactoryClass = XORRawErasureCoderFactory.class;
|
||||
this.decoderFactoryClass = NativeXORRawErasureCoderFactory.class;
|
||||
setAllowDump(true);
|
||||
}
|
||||
}
|
||||
|
@ -29,8 +29,8 @@ public class TestXORRawCoderInteroperable2 extends TestXORRawCoderBase {
|
||||
@Before
|
||||
public void setup() {
|
||||
Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
|
||||
this.encoderClass = NativeXORRawEncoder.class;
|
||||
this.decoderClass = XORRawDecoder.class;
|
||||
this.encoderFactoryClass = NativeXORRawErasureCoderFactory.class;
|
||||
this.decoderFactoryClass = XORRawErasureCoderFactory.class;
|
||||
setAllowDump(true);
|
||||
}
|
||||
|
||||
|
@ -807,7 +807,7 @@ public void run() {
|
||||
}
|
||||
// wait until reader put a call to callQueue, to make sure all readers
|
||||
// are blocking on the queue after initialClients threads are started.
|
||||
verify(spy, timeout(100).times(i + 1)).put(Mockito.<Call>anyObject());
|
||||
verify(spy, timeout(5000).times(i + 1)).put(Mockito.<Call>anyObject());
|
||||
}
|
||||
|
||||
try {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user