Merge branch 'trunk' into HADOOP-17377
This commit is contained in:
commit
62834104a7
|
@ -14,6 +14,8 @@
|
|||
# limitations under the License.
|
||||
|
||||
github:
|
||||
ghp_path: /
|
||||
ghp_branch: gh-pages
|
||||
enabled_merge_buttons:
|
||||
squash: true
|
||||
merge: false
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
name: website
|
||||
|
||||
# Controls when the action will run.
|
||||
on:
|
||||
push:
|
||||
branches: [ trunk ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Hadoop trunk
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: apache/hadoop
|
||||
- name: Set up JDK 8
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: '8'
|
||||
distribution: 'temurin'
|
||||
- name: Cache local Maven repository
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.m2/repository
|
||||
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-maven-
|
||||
- name: Build Hadoop maven plugins
|
||||
run: cd hadoop-maven-plugins && mvn --batch-mode install
|
||||
- name: Build Hadoop
|
||||
run: mvn clean install -DskipTests -DskipShade
|
||||
- name: Build document
|
||||
run: mvn clean site
|
||||
- name: Stage document
|
||||
run: mvn site:stage -DstagingDirectory=${GITHUB_WORKSPACE}/staging/
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./staging/hadoop-project
|
||||
user_name: 'github-actions[bot]'
|
||||
user_email: 'github-actions[bot]@users.noreply.github.com'
|
||||
|
|
@ -210,9 +210,9 @@ hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/nvd3-1.8.5.* (css and js
|
|||
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java
|
||||
|
||||
com.aliyun:aliyun-java-sdk-core:3.4.0
|
||||
com.aliyun:aliyun-java-sdk-ecs:4.2.0
|
||||
com.aliyun:aliyun-java-sdk-ram:3.0.0
|
||||
com.aliyun:aliyun-java-sdk-core:4.5.10
|
||||
com.aliyun:aliyun-java-sdk-kms:2.11.0
|
||||
com.aliyun:aliyun-java-sdk-ram:3.1.0
|
||||
com.aliyun:aliyun-java-sdk-sts:3.0.0
|
||||
com.aliyun.oss:aliyun-sdk-oss:3.13.2
|
||||
com.amazonaws:aws-java-sdk-bundle:1.12.316
|
||||
|
@ -250,7 +250,6 @@ commons-codec:commons-codec:1.11
|
|||
commons-collections:commons-collections:3.2.2
|
||||
commons-daemon:commons-daemon:1.0.13
|
||||
commons-io:commons-io:2.8.0
|
||||
commons-logging:commons-logging:1.1.3
|
||||
commons-net:commons-net:3.9.0
|
||||
de.ruedigermoeller:fst:2.50
|
||||
io.grpc:grpc-api:1.26.0
|
||||
|
@ -260,7 +259,6 @@ io.grpc:grpc-netty:1.26.0
|
|||
io.grpc:grpc-protobuf:1.26.0
|
||||
io.grpc:grpc-protobuf-lite:1.26.0
|
||||
io.grpc:grpc-stub:1.26.0
|
||||
io.netty:netty:3.10.6.Final
|
||||
io.netty:netty-all:4.1.77.Final
|
||||
io.netty:netty-buffer:4.1.77.Final
|
||||
io.netty:netty-codec:4.1.77.Final
|
||||
|
@ -325,25 +323,25 @@ org.apache.htrace:htrace-core4:4.1.0-incubating
|
|||
org.apache.httpcomponents:httpclient:4.5.6
|
||||
org.apache.httpcomponents:httpcore:4.4.10
|
||||
org.apache.kafka:kafka-clients:2.8.2
|
||||
org.apache.kerby:kerb-admin:2.0.2
|
||||
org.apache.kerby:kerb-client:2.0.2
|
||||
org.apache.kerby:kerb-common:2.0.2
|
||||
org.apache.kerby:kerb-core:2.0.2
|
||||
org.apache.kerby:kerb-crypto:2.0.2
|
||||
org.apache.kerby:kerb-identity:2.0.2
|
||||
org.apache.kerby:kerb-server:2.0.2
|
||||
org.apache.kerby:kerb-simplekdc:2.0.2
|
||||
org.apache.kerby:kerb-util:2.0.2
|
||||
org.apache.kerby:kerby-asn1:2.0.2
|
||||
org.apache.kerby:kerby-config:2.0.2
|
||||
org.apache.kerby:kerby-pkix:2.0.2
|
||||
org.apache.kerby:kerby-util:2.0.2
|
||||
org.apache.kerby:kerby-xdr:2.0.2
|
||||
org.apache.kerby:token-provider:2.0.2
|
||||
org.apache.kerby:kerb-admin:2.0.3
|
||||
org.apache.kerby:kerb-client:2.0.3
|
||||
org.apache.kerby:kerb-common:2.0.3
|
||||
org.apache.kerby:kerb-core:2.0.3
|
||||
org.apache.kerby:kerb-crypto:2.0.3
|
||||
org.apache.kerby:kerb-identity:2.0.3
|
||||
org.apache.kerby:kerb-server:2.0.3
|
||||
org.apache.kerby:kerb-simplekdc:2.0.3
|
||||
org.apache.kerby:kerb-util:2.0.3
|
||||
org.apache.kerby:kerby-asn1:2.0.3
|
||||
org.apache.kerby:kerby-config:2.0.3
|
||||
org.apache.kerby:kerby-pkix:2.0.3
|
||||
org.apache.kerby:kerby-util:2.0.3
|
||||
org.apache.kerby:kerby-xdr:2.0.3
|
||||
org.apache.kerby:token-provider:2.0.3
|
||||
org.apache.solr:solr-solrj:8.8.2
|
||||
org.apache.yetus:audience-annotations:0.5.0
|
||||
org.apache.zookeeper:zookeeper:3.6.3
|
||||
org.codehaus.jettison:jettison:1.5.1
|
||||
org.codehaus.jettison:jettison:1.5.3
|
||||
org.eclipse.jetty:jetty-annotations:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-http:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-io:9.4.48.v20220622
|
||||
|
@ -359,11 +357,14 @@ org.eclipse.jetty:jetty-xml:9.4.48.v20220622
|
|||
org.eclipse.jetty.websocket:javax-websocket-client-impl:9.4.48.v20220622
|
||||
org.eclipse.jetty.websocket:javax-websocket-server-impl:9.4.48.v20220622
|
||||
org.ehcache:ehcache:3.3.1
|
||||
org.ini4j:ini4j:0.5.4
|
||||
org.jetbrains.kotlin:kotlin-stdlib:1.4.10
|
||||
org.jetbrains.kotlin:kotlin-stdlib-common:1.4.10
|
||||
org.lz4:lz4-java:1.7.1
|
||||
org.objenesis:objenesis:2.6
|
||||
org.xerial.snappy:snappy-java:1.0.5
|
||||
org.yaml:snakeyaml:1.33
|
||||
org.wildfly.openssl:wildfly-openssl:1.0.7.Final
|
||||
org.yaml:snakeyaml:2.0
|
||||
org.wildfly.openssl:wildfly-openssl:1.1.3.Final
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
@ -518,6 +519,8 @@ Eclipse Public License 1.0
|
|||
--------------------------
|
||||
|
||||
junit:junit:4.13.2
|
||||
org.jacoco:org.jacoco.agent:0.8.5
|
||||
|
||||
|
||||
|
||||
HSQL License
|
||||
|
|
|
@ -69,6 +69,10 @@
|
|||
<groupId>com.github.pjfanning</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-server</artifactId>
|
||||
|
@ -182,6 +186,10 @@
|
|||
<groupId>com.github.pjfanning</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty</artifactId>
|
||||
|
@ -233,6 +241,10 @@
|
|||
<groupId>com.github.pjfanning</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-servlet</artifactId>
|
||||
|
@ -290,6 +302,10 @@
|
|||
<groupId>com.github.pjfanning</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty</artifactId>
|
||||
|
|
|
@ -24,7 +24,7 @@ This filter must be configured in front of all the web application resources tha
|
|||
|
||||
The Hadoop Auth and dependent JAR files must be in the web application classpath (commonly the `WEB-INF/lib` directory).
|
||||
|
||||
Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define the SLF4J API dependency but it does not define the dependency on a concrete logging implementation, this must be addded explicitly to the web application. For example, if the web applicationan uses Log4j, the SLF4J-LOG4J12 and LOG4J jar files must be part part of the web application classpath as well as the Log4j configuration file.
|
||||
Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define the SLF4J API dependency but it does not define the dependency on a concrete logging implementation, this must be addded explicitly to the web application. For example, if the web applicationan uses Log4j, the SLF4J-LOG4J12 and LOG4J jar files must be part of the web application classpath as well as the Log4j configuration file.
|
||||
|
||||
### Common Configuration parameters
|
||||
|
||||
|
|
|
@ -176,13 +176,16 @@
|
|||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-server</artifactId>
|
||||
<scope>compile</scope>
|
||||
<!--
|
||||
adding jettison as direct dependency (as jersey-json's jettison dependency is vulnerable with verison 1.1),
|
||||
so those who depends on hadoop-common externally will get the non-vulnerable jettison
|
||||
-->
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-server</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
|
@ -200,11 +203,6 @@
|
|||
<artifactId>assertj-core</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.glassfish.grizzly</groupId>
|
||||
<artifactId>grizzly-http-servlet</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-beanutils</groupId>
|
||||
<artifactId>commons-beanutils</artifactId>
|
||||
|
|
|
@ -26,9 +26,9 @@ MYNAME="${BASH_SOURCE-$0}"
|
|||
function hadoop_usage
|
||||
{
|
||||
hadoop_add_option "buildpaths" "attempt to add class files from build tree"
|
||||
hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in slave mode"
|
||||
hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in worker mode"
|
||||
hadoop_add_option "loglevel level" "set the log4j level for this command"
|
||||
hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
|
||||
hadoop_add_option "hosts filename" "list of hosts to use in worker mode"
|
||||
hadoop_add_option "workers" "turn on worker mode"
|
||||
|
||||
hadoop_add_subcommand "checknative" client "check native Hadoop and compression libraries availability"
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
|
||||
# Run a Hadoop command on all slave hosts.
|
||||
# Run a Hadoop command on all worker hosts.
|
||||
|
||||
function hadoop_usage
|
||||
{
|
||||
|
|
|
@ -75,14 +75,6 @@ log4j.appender.console.target=System.err
|
|||
log4j.appender.console.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
|
||||
#
|
||||
# TaskLog Appender
|
||||
#
|
||||
log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
|
||||
|
||||
log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
|
||||
#
|
||||
# HDFS block state change log from block manager
|
||||
#
|
||||
|
|
|
@ -241,12 +241,15 @@ public class CryptoOutputStream extends FilterOutputStream implements
|
|||
return;
|
||||
}
|
||||
try {
|
||||
flush();
|
||||
if (closeOutputStream) {
|
||||
super.close();
|
||||
codec.close();
|
||||
try {
|
||||
flush();
|
||||
} finally {
|
||||
if (closeOutputStream) {
|
||||
super.close();
|
||||
codec.close();
|
||||
}
|
||||
freeBuffers();
|
||||
}
|
||||
freeBuffers();
|
||||
} finally {
|
||||
closed = true;
|
||||
}
|
||||
|
|
|
@ -2413,8 +2413,14 @@ public abstract class FileSystem extends Configured
|
|||
if (stat.isFile()) { // file
|
||||
curFile = stat;
|
||||
} else if (recursive) { // directory
|
||||
itors.push(curItor);
|
||||
curItor = listLocatedStatus(stat.getPath());
|
||||
try {
|
||||
RemoteIterator<LocatedFileStatus> newDirItor = listLocatedStatus(stat.getPath());
|
||||
itors.push(curItor);
|
||||
curItor = newDirItor;
|
||||
} catch (FileNotFoundException ignored) {
|
||||
LOGGER.debug("Directory {} deleted while attempting for recursive listing",
|
||||
stat.getPath());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -414,7 +414,14 @@ public class LocalDirAllocator {
|
|||
|
||||
//build the "roulette wheel"
|
||||
for(int i =0; i < ctx.dirDF.length; ++i) {
|
||||
availableOnDisk[i] = ctx.dirDF[i].getAvailable();
|
||||
final DF target = ctx.dirDF[i];
|
||||
// attempt to recreate the dir so that getAvailable() is valid
|
||||
// if it fails, getAvailable() will return 0, so the dir will
|
||||
// be declared unavailable.
|
||||
// return value is logged at debug to keep spotbugs quiet.
|
||||
final boolean b = new File(target.getDirPath()).mkdirs();
|
||||
LOG.debug("mkdirs of {}={}", target, b);
|
||||
availableOnDisk[i] = target.getAvailable();
|
||||
totalAvailable += availableOnDisk[i];
|
||||
}
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ public class Trash extends Configured {
|
|||
* Hence we get the file system of the fully-qualified resolved-path and
|
||||
* then move the path p to the trashbin in that volume,
|
||||
* @param fs - the filesystem of path p
|
||||
* @param p - the path being deleted - to be moved to trasg
|
||||
* @param p - the path being deleted - to be moved to trash
|
||||
* @param conf - configuration
|
||||
* @return false if the item is already in the trash or trash is disabled
|
||||
* @throws IOException on error
|
||||
|
|
|
@ -302,7 +302,12 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
|
||||
private void read(BufferData data) throws IOException {
|
||||
synchronized (data) {
|
||||
readBlock(data, false, BufferData.State.BLANK);
|
||||
try {
|
||||
readBlock(data, false, BufferData.State.BLANK);
|
||||
} catch (IOException e) {
|
||||
LOG.error("error reading block {}", data.getBlockNumber(), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -362,9 +367,6 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
buffer.flip();
|
||||
data.setReady(expectedState);
|
||||
} catch (Exception e) {
|
||||
String message = String.format("error during readBlock(%s)", data.getBlockNumber());
|
||||
LOG.error(message, e);
|
||||
|
||||
if (isPrefetch && tracker != null) {
|
||||
tracker.failed();
|
||||
}
|
||||
|
@ -406,7 +408,8 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
try {
|
||||
blockManager.prefetch(data, taskQueuedStartTime);
|
||||
} catch (Exception e) {
|
||||
LOG.error("error during prefetch", e);
|
||||
LOG.info("error prefetching block {}. {}", data.getBlockNumber(), e.getMessage());
|
||||
LOG.debug("error prefetching block {}", data.getBlockNumber(), e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
@ -493,7 +496,8 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
return;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.error("error waiting on blockFuture: {}", data, e);
|
||||
LOG.info("error waiting on blockFuture: {}. {}", data, e.getMessage());
|
||||
LOG.debug("error waiting on blockFuture: {}", data, e);
|
||||
data.setDone();
|
||||
return;
|
||||
}
|
||||
|
@ -523,8 +527,8 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
data.setDone();
|
||||
} catch (Exception e) {
|
||||
numCachingErrors.incrementAndGet();
|
||||
String message = String.format("error adding block to cache after wait: %s", data);
|
||||
LOG.error(message, e);
|
||||
LOG.info("error adding block to cache after wait: {}. {}", data, e.getMessage());
|
||||
LOG.debug("error adding block to cache after wait: {}", data, e);
|
||||
data.setDone();
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Filesystem implementations that allow Hadoop to read directly from
|
||||
* the local file system.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.fs.local;
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Support for the execution of a file system command.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.fs.shell;
|
||||
|
|
|
@ -497,7 +497,12 @@ public final class HttpServer2 implements FilterContainer {
|
|||
prefix -> this.conf.get(prefix + "type")
|
||||
.equals(PseudoAuthenticationHandler.TYPE))
|
||||
) {
|
||||
server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
|
||||
server.initSpnego(
|
||||
conf,
|
||||
hostName,
|
||||
getFilterProperties(conf, authFilterConfigurationPrefixes),
|
||||
usernameConfKey,
|
||||
keytabConfKey);
|
||||
}
|
||||
|
||||
for (URI ep : endpoints) {
|
||||
|
@ -1340,8 +1345,12 @@ public final class HttpServer2 implements FilterContainer {
|
|||
}
|
||||
|
||||
private void initSpnego(Configuration conf, String hostName,
|
||||
String usernameConfKey, String keytabConfKey) throws IOException {
|
||||
Properties authFilterConfigurationPrefixes, String usernameConfKey, String keytabConfKey)
|
||||
throws IOException {
|
||||
Map<String, String> params = new HashMap<>();
|
||||
for (Map.Entry<Object, Object> entry : authFilterConfigurationPrefixes.entrySet()) {
|
||||
params.put(String.valueOf(entry.getKey()), String.valueOf(entry.getValue()));
|
||||
}
|
||||
String principalInConf = conf.get(usernameConfKey);
|
||||
if (principalInConf != null && !principalInConf.isEmpty()) {
|
||||
params.put("kerberos.principal", SecurityUtil.getServerPrincipal(
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Support for embedded HTTP services.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.http;
|
||||
|
|
|
@ -32,7 +32,6 @@ import java.nio.file.StandardOpenOption;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -246,30 +245,6 @@ public class IOUtils {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
|
||||
* null pointers. Must only be used for cleanup in exception handlers.
|
||||
*
|
||||
* @param log the log to record problems to at debug level. Can be null.
|
||||
* @param closeables the objects to close
|
||||
* @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)}
|
||||
* instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static void cleanup(Log log, java.io.Closeable... closeables) {
|
||||
for (java.io.Closeable c : closeables) {
|
||||
if (c != null) {
|
||||
try {
|
||||
c.close();
|
||||
} catch(Throwable e) {
|
||||
if (log != null && log.isDebugEnabled()) {
|
||||
log.debug("Exception in closing " + c, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
|
||||
* null pointers. Must only be used for cleanup in exception handlers.
|
||||
|
|
|
@ -92,7 +92,7 @@ public class WritableName {
|
|||
) throws IOException {
|
||||
Class<?> writableClass = NAME_TO_CLASS.get(name);
|
||||
if (writableClass != null)
|
||||
return writableClass.asSubclass(Writable.class);
|
||||
return writableClass;
|
||||
try {
|
||||
return conf.getClassByName(name);
|
||||
} catch (ClassNotFoundException e) {
|
||||
|
|
|
@ -15,6 +15,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Implementation of compression/decompression for the BZip2
|
||||
* compression algorithm.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.io.compress.bzip2;
|
||||
|
|
|
@ -15,6 +15,13 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Implementation of compression/decompression for the LZ4
|
||||
* compression algorithm.
|
||||
*
|
||||
* @see <a href="http://code.google.com/p/lz4/">LZ4</a>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.io.compress.lz4;
|
||||
|
|
|
@ -15,6 +15,13 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Implementation of compression/decompression for the Snappy
|
||||
* compression algorithm.
|
||||
*
|
||||
* @see <a href="http://code.google.com/p/snappy/">Snappy</a>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.io.compress.snappy;
|
||||
|
|
|
@ -15,6 +15,13 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Implementation of compression/decompression based on the popular
|
||||
* gzip compressed file format.
|
||||
*
|
||||
* @see <a href="http://www.gzip.org/">gzip</a>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.io.compress.zlib;
|
||||
|
|
|
@ -15,6 +15,13 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Implementation of compression/decompression based on the zStandard
|
||||
* compression algorithm.
|
||||
*
|
||||
* @see <a href="https://github.com/facebook/zstd">zStandard</a>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.io.compress.zstd;
|
||||
|
|
|
@ -15,6 +15,12 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Various native IO-related calls not available in Java. These
|
||||
* functions should generally be used alongside a fallback to another
|
||||
* more portable mechanism.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.io.nativeio;
|
||||
|
|
|
@ -49,6 +49,7 @@ public final class CallerContext {
|
|||
public static final String CLIENT_PORT_STR = "clientPort";
|
||||
public static final String CLIENT_ID_STR = "clientId";
|
||||
public static final String CLIENT_CALL_ID_STR = "clientCallId";
|
||||
public static final String REAL_USER_STR = "realUser";
|
||||
|
||||
/** The caller context.
|
||||
*
|
||||
|
|
|
@ -704,7 +704,7 @@ public class Client implements AutoCloseable {
|
|||
* handle that, a relogin is attempted.
|
||||
*/
|
||||
private synchronized void handleSaslConnectionFailure(
|
||||
final int currRetries, final int maxRetries, final Exception ex,
|
||||
final int currRetries, final int maxRetries, final IOException ex,
|
||||
final Random rand, final UserGroupInformation ugi) throws IOException,
|
||||
InterruptedException {
|
||||
ugi.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
|
@ -715,10 +715,7 @@ public class Client implements AutoCloseable {
|
|||
disposeSasl();
|
||||
if (shouldAuthenticateOverKrb()) {
|
||||
if (currRetries < maxRetries) {
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Exception encountered while connecting to "
|
||||
+ "the server : " + ex);
|
||||
}
|
||||
LOG.debug("Exception encountered while connecting to the server {}", remoteId, ex);
|
||||
// try re-login
|
||||
if (UserGroupInformation.isLoginKeytabBased()) {
|
||||
UserGroupInformation.getLoginUser().reloginFromKeytab();
|
||||
|
@ -736,7 +733,11 @@ public class Client implements AutoCloseable {
|
|||
+ UserGroupInformation.getLoginUser().getUserName() + " to "
|
||||
+ remoteId;
|
||||
LOG.warn(msg, ex);
|
||||
throw (IOException) new IOException(msg).initCause(ex);
|
||||
throw NetUtils.wrapException(remoteId.getAddress().getHostName(),
|
||||
remoteId.getAddress().getPort(),
|
||||
NetUtils.getHostname(),
|
||||
0,
|
||||
ex);
|
||||
}
|
||||
} else {
|
||||
// With RequestHedgingProxyProvider, one rpc call will send multiple
|
||||
|
@ -744,11 +745,9 @@ public class Client implements AutoCloseable {
|
|||
// all other requests will be interrupted. It's not a big problem,
|
||||
// and should not print a warning log.
|
||||
if (ex instanceof InterruptedIOException) {
|
||||
LOG.debug("Exception encountered while connecting to the server",
|
||||
ex);
|
||||
LOG.debug("Exception encountered while connecting to the server {}", remoteId, ex);
|
||||
} else {
|
||||
LOG.warn("Exception encountered while connecting to the server ",
|
||||
ex);
|
||||
LOG.warn("Exception encountered while connecting to the server {}", remoteId, ex);
|
||||
}
|
||||
}
|
||||
if (ex instanceof RemoteException)
|
||||
|
@ -1182,7 +1181,14 @@ public class Client implements AutoCloseable {
|
|||
final ResponseBuffer buf = new ResponseBuffer();
|
||||
header.writeDelimitedTo(buf);
|
||||
RpcWritable.wrap(call.rpcRequest).writeTo(buf);
|
||||
rpcRequestQueue.put(Pair.of(call, buf));
|
||||
// Wait for the message to be sent. We offer with timeout to
|
||||
// prevent a race condition between checking the shouldCloseConnection
|
||||
// and the stopping of the polling thread
|
||||
while (!shouldCloseConnection.get()) {
|
||||
if (rpcRequestQueue.offer(Pair.of(call, buf), 1, TimeUnit.SECONDS)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Receive a response.
|
||||
|
|
|
@ -900,12 +900,26 @@ public class RPC {
|
|||
/**
|
||||
* @return Default: -1.
|
||||
* @param numReaders input numReaders.
|
||||
* @deprecated call {@link #setNumReaders(int value)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public Builder setnumReaders(int numReaders) {
|
||||
this.numReaders = numReaders;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the number of reader threads.
|
||||
*
|
||||
* @return this builder.
|
||||
* @param value input numReaders.
|
||||
* @since HADOOP-18625.
|
||||
*/
|
||||
public Builder setNumReaders(int value) {
|
||||
this.numReaders = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Default: -1.
|
||||
* @param queueSizePerHandler
|
||||
|
|
|
@ -123,6 +123,7 @@ import org.apache.hadoop.util.ExitUtil;
|
|||
import org.apache.hadoop.util.ProtoUtil;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import org.apache.hadoop.tracing.Span;
|
||||
import org.apache.hadoop.tracing.SpanContext;
|
||||
import org.apache.hadoop.tracing.TraceScope;
|
||||
|
@ -153,6 +154,13 @@ public abstract class Server {
|
|||
private ExceptionsHandler exceptionsHandler = new ExceptionsHandler();
|
||||
private Tracer tracer;
|
||||
private AlignmentContext alignmentContext;
|
||||
|
||||
/**
|
||||
* Allow server to do force Kerberos re-login once after failure irrespective
|
||||
* of the last login time.
|
||||
*/
|
||||
private final AtomicBoolean canTryForceLogin = new AtomicBoolean(true);
|
||||
|
||||
/**
|
||||
* Logical name of the server used in metrics and monitor.
|
||||
*/
|
||||
|
@ -1977,11 +1985,26 @@ public abstract class Server {
|
|||
private long lastContact;
|
||||
private int dataLength;
|
||||
private Socket socket;
|
||||
|
||||
// Cache the remote host & port info so that even if the socket is
|
||||
// disconnected, we can say where it used to connect to.
|
||||
private String hostAddress;
|
||||
private int remotePort;
|
||||
private InetAddress addr;
|
||||
|
||||
/**
|
||||
* Client Host IP address from where the socket connection is being established to the Server.
|
||||
*/
|
||||
private final String hostAddress;
|
||||
/**
|
||||
* Client remote port used for the given socket connection.
|
||||
*/
|
||||
private final int remotePort;
|
||||
/**
|
||||
* Address to which the socket is connected to.
|
||||
*/
|
||||
private final InetAddress addr;
|
||||
/**
|
||||
* Client Host address from where the socket connection is being established to the Server.
|
||||
*/
|
||||
private final String hostName;
|
||||
|
||||
IpcConnectionContextProto connectionContext;
|
||||
String protocolName;
|
||||
|
@ -2025,8 +2048,12 @@ public abstract class Server {
|
|||
this.isOnAuxiliaryPort = isOnAuxiliaryPort;
|
||||
if (addr == null) {
|
||||
this.hostAddress = "*Unknown*";
|
||||
this.hostName = this.hostAddress;
|
||||
} else {
|
||||
// host IP address
|
||||
this.hostAddress = addr.getHostAddress();
|
||||
// host name for the IP address
|
||||
this.hostName = addr.getHostName();
|
||||
}
|
||||
this.remotePort = socket.getPort();
|
||||
this.responseQueue = new LinkedList<RpcCall>();
|
||||
|
@ -2042,7 +2069,7 @@ public abstract class Server {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getHostAddress() + ":" + remotePort;
|
||||
return hostName + ":" + remotePort + " / " + hostAddress + ":" + remotePort;
|
||||
}
|
||||
|
||||
boolean setShouldClose() {
|
||||
|
@ -2206,7 +2233,23 @@ public abstract class Server {
|
|||
AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + ":"
|
||||
+ attemptingUser + " (" + e.getLocalizedMessage()
|
||||
+ ") with true cause: (" + tce.getLocalizedMessage() + ")");
|
||||
throw tce;
|
||||
if (!UserGroupInformation.getLoginUser().isLoginSuccess()) {
|
||||
doKerberosRelogin();
|
||||
try {
|
||||
// try processing message again
|
||||
LOG.debug("Reprocessing sasl message for {}:{} after re-login",
|
||||
this.toString(), attemptingUser);
|
||||
saslResponse = processSaslMessage(saslMessage);
|
||||
AUDITLOG.info("Retry {}{}:{} after failure", AUTH_SUCCESSFUL_FOR,
|
||||
this.toString(), attemptingUser);
|
||||
canTryForceLogin.set(true);
|
||||
} catch (IOException exp) {
|
||||
tce = (IOException) getTrueCause(e);
|
||||
throw tce;
|
||||
}
|
||||
} else {
|
||||
throw tce;
|
||||
}
|
||||
}
|
||||
|
||||
if (saslServer != null && saslServer.isComplete()) {
|
||||
|
@ -2439,19 +2482,18 @@ public abstract class Server {
|
|||
return -1;
|
||||
}
|
||||
|
||||
if(!RpcConstants.HEADER.equals(dataLengthBuffer)) {
|
||||
LOG.warn("Incorrect RPC Header length from {}:{} "
|
||||
+ "expected length: {} got length: {}",
|
||||
hostAddress, remotePort, RpcConstants.HEADER, dataLengthBuffer);
|
||||
if (!RpcConstants.HEADER.equals(dataLengthBuffer)) {
|
||||
LOG.warn("Incorrect RPC Header length from {}:{} / {}:{}. Expected: {}. Actual: {}",
|
||||
hostName, remotePort, hostAddress, remotePort, RpcConstants.HEADER,
|
||||
dataLengthBuffer);
|
||||
setupBadVersionResponse(version);
|
||||
return -1;
|
||||
}
|
||||
if (version != CURRENT_VERSION) {
|
||||
//Warning is ok since this is not supposed to happen.
|
||||
LOG.warn("Version mismatch from " +
|
||||
hostAddress + ":" + remotePort +
|
||||
" got version " + version +
|
||||
" expected version " + CURRENT_VERSION);
|
||||
LOG.warn("Version mismatch from {}:{} / {}:{}. "
|
||||
+ "Expected version: {}. Actual version: {} ", hostName,
|
||||
remotePort, hostAddress, remotePort, CURRENT_VERSION, version);
|
||||
setupBadVersionResponse(version);
|
||||
return -1;
|
||||
}
|
||||
|
@ -3322,6 +3364,26 @@ public abstract class Server {
|
|||
metricsUpdaterInterval, metricsUpdaterInterval, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
private synchronized void doKerberosRelogin() throws IOException {
|
||||
if(UserGroupInformation.getLoginUser().isLoginSuccess()){
|
||||
return;
|
||||
}
|
||||
LOG.warn("Initiating re-login from IPC Server");
|
||||
if (canTryForceLogin.compareAndSet(true, false)) {
|
||||
if (UserGroupInformation.isLoginKeytabBased()) {
|
||||
UserGroupInformation.getLoginUser().forceReloginFromKeytab();
|
||||
} else if (UserGroupInformation.isLoginTicketBased()) {
|
||||
UserGroupInformation.getLoginUser().forceReloginFromTicketCache();
|
||||
}
|
||||
} else {
|
||||
if (UserGroupInformation.isLoginKeytabBased()) {
|
||||
UserGroupInformation.getLoginUser().reloginFromKeytab();
|
||||
} else if (UserGroupInformation.isLoginTicketBased()) {
|
||||
UserGroupInformation.getLoginUser().reloginFromTicketCache();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void addAuxiliaryListener(int auxiliaryPort)
|
||||
throws IOException {
|
||||
if (auxiliaryListenerMap == null) {
|
||||
|
|
|
@ -1,263 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
package org.apache.hadoop.log;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.MappingJsonFactory;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.ObjectReader;
|
||||
import com.fasterxml.jackson.databind.node.ContainerNode;
|
||||
import org.apache.log4j.Layout;
|
||||
import org.apache.log4j.helpers.ISO8601DateFormat;
|
||||
import org.apache.log4j.spi.LoggingEvent;
|
||||
import org.apache.log4j.spi.ThrowableInformation;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.io.Writer;
|
||||
import java.text.DateFormat;
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* This offers a log layout for JSON, with some test entry points. It's purpose is
|
||||
* to allow Log4J to generate events that are easy for other programs to parse, but which are somewhat
|
||||
* human-readable.
|
||||
*
|
||||
* Some features.
|
||||
*
|
||||
* <ol>
|
||||
* <li>Every event is a standalone JSON clause</li>
|
||||
* <li>Time is published as a time_t event since 1/1/1970
|
||||
* -this is the fastest to generate.</li>
|
||||
* <li>An ISO date is generated, but this is cached and will only be accurate to within a second</li>
|
||||
* <li>the stack trace is included as an array</li>
|
||||
* </ol>
|
||||
*
|
||||
* A simple log event will resemble the following
|
||||
* <pre>
|
||||
* {"name":"test","time":1318429136789,"date":"2011-10-12 15:18:56,789","level":"INFO","thread":"main","message":"test message"}
|
||||
* </pre>
|
||||
*
|
||||
* An event with an error will contain data similar to that below (which has been reformatted to be multi-line).
|
||||
*
|
||||
* <pre>
|
||||
* {
|
||||
* "name":"testException",
|
||||
* "time":1318429136789,
|
||||
* "date":"2011-10-12 15:18:56,789",
|
||||
* "level":"INFO",
|
||||
* "thread":"quoted\"",
|
||||
* "message":"new line\n and {}",
|
||||
* "exceptionclass":"java.net.NoRouteToHostException",
|
||||
* "stack":[
|
||||
* "java.net.NoRouteToHostException: that box caught fire 3 years ago",
|
||||
* "\tat org.apache.hadoop.log.TestLog4Json.testException(TestLog4Json.java:49)",
|
||||
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)",
|
||||
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)",
|
||||
* "\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)",
|
||||
* "\tat java.lang.reflect.Method.invoke(Method.java:597)",
|
||||
* "\tat junit.framework.TestCase.runTest(TestCase.java:168)",
|
||||
* "\tat junit.framework.TestCase.runBare(TestCase.java:134)",
|
||||
* "\tat junit.framework.TestResult$1.protect(TestResult.java:110)",
|
||||
* "\tat junit.framework.TestResult.runProtected(TestResult.java:128)",
|
||||
* "\tat junit.framework.TestResult.run(TestResult.java:113)",
|
||||
* "\tat junit.framework.TestCase.run(TestCase.java:124)",
|
||||
* "\tat junit.framework.TestSuite.runTest(TestSuite.java:232)",
|
||||
* "\tat junit.framework.TestSuite.run(TestSuite.java:227)",
|
||||
* "\tat org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)",
|
||||
* "\tat org.apache.maven.surefire.junit4.JUnit4TestSet.execute(JUnit4TestSet.java:59)",
|
||||
* "\tat org.apache.maven.surefire.suite.AbstractDirectoryTestSuite.executeTestSet(AbstractDirectoryTestSuite.java:120)",
|
||||
* "\tat org.apache.maven.surefire.suite.AbstractDirectoryTestSuite.execute(AbstractDirectoryTestSuite.java:145)",
|
||||
* "\tat org.apache.maven.surefire.Surefire.run(Surefire.java:104)",
|
||||
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)",
|
||||
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)",
|
||||
* "\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)",
|
||||
* "\tat java.lang.reflect.Method.invoke(Method.java:597)",
|
||||
* "\tat org.apache.maven.surefire.booter.SurefireBooter.runSuitesInProcess(SurefireBooter.java:290)",
|
||||
* "\tat org.apache.maven.surefire.booter.SurefireBooter.main(SurefireBooter.java:1017)"
|
||||
* ]
|
||||
* }
|
||||
* </pre>
|
||||
*/
|
||||
public class Log4Json extends Layout {
|
||||
|
||||
/**
|
||||
* Jackson factories are thread safe when constructing parsers and generators.
|
||||
* They are not thread safe in configure methods; if there is to be any
|
||||
* configuration it must be done in a static initializer block.
|
||||
*/
|
||||
private static final JsonFactory factory = new MappingJsonFactory();
|
||||
private static final ObjectReader READER = new ObjectMapper(factory).reader();
|
||||
public static final String DATE = "date";
|
||||
public static final String EXCEPTION_CLASS = "exceptionclass";
|
||||
public static final String LEVEL = "level";
|
||||
public static final String MESSAGE = "message";
|
||||
public static final String NAME = "name";
|
||||
public static final String STACK = "stack";
|
||||
public static final String THREAD = "thread";
|
||||
public static final String TIME = "time";
|
||||
public static final String JSON_TYPE = "application/json";
|
||||
|
||||
private final DateFormat dateFormat;
|
||||
|
||||
public Log4Json() {
|
||||
dateFormat = new ISO8601DateFormat();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @return the mime type of JSON
|
||||
*/
|
||||
@Override
|
||||
public String getContentType() {
|
||||
return JSON_TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String format(LoggingEvent event) {
|
||||
try {
|
||||
return toJson(event);
|
||||
} catch (IOException e) {
|
||||
//this really should not happen, and rather than throw an exception
|
||||
//which may hide the real problem, the log class is printed
|
||||
//in JSON format. The classname is used to ensure valid JSON is
|
||||
//returned without playing escaping games
|
||||
return "{ \"logfailure\":\"" + e.getClass().toString() + "\"}";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert an event to JSON
|
||||
*
|
||||
* @param event the event -must not be null
|
||||
* @return a string value
|
||||
* @throws IOException on problems generating the JSON
|
||||
*/
|
||||
public String toJson(LoggingEvent event) throws IOException {
|
||||
StringWriter writer = new StringWriter();
|
||||
toJson(writer, event);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert an event to JSON
|
||||
*
|
||||
* @param writer the destination writer
|
||||
* @param event the event -must not be null
|
||||
* @return the writer
|
||||
* @throws IOException on problems generating the JSON
|
||||
*/
|
||||
public Writer toJson(final Writer writer, final LoggingEvent event)
|
||||
throws IOException {
|
||||
ThrowableInformation ti = event.getThrowableInformation();
|
||||
toJson(writer,
|
||||
event.getLoggerName(),
|
||||
event.getTimeStamp(),
|
||||
event.getLevel().toString(),
|
||||
event.getThreadName(),
|
||||
event.getRenderedMessage(),
|
||||
ti);
|
||||
return writer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a JSON entry from the parameters. This is public for testing.
|
||||
*
|
||||
* @param writer destination
|
||||
* @param loggerName logger name
|
||||
* @param timeStamp time_t value
|
||||
* @param level level string
|
||||
* @param threadName name of the thread
|
||||
* @param message rendered message
|
||||
* @param ti nullable thrown information
|
||||
* @return the writer
|
||||
* @throws IOException on any problem
|
||||
*/
|
||||
public Writer toJson(final Writer writer,
|
||||
final String loggerName,
|
||||
final long timeStamp,
|
||||
final String level,
|
||||
final String threadName,
|
||||
final String message,
|
||||
final ThrowableInformation ti) throws IOException {
|
||||
JsonGenerator json = factory.createGenerator(writer);
|
||||
json.writeStartObject();
|
||||
json.writeStringField(NAME, loggerName);
|
||||
json.writeNumberField(TIME, timeStamp);
|
||||
Date date = new Date(timeStamp);
|
||||
json.writeStringField(DATE, dateFormat.format(date));
|
||||
json.writeStringField(LEVEL, level);
|
||||
json.writeStringField(THREAD, threadName);
|
||||
json.writeStringField(MESSAGE, message);
|
||||
if (ti != null) {
|
||||
//there is some throwable info, but if the log event has been sent over the wire,
|
||||
//there may not be a throwable inside it, just a summary.
|
||||
Throwable thrown = ti.getThrowable();
|
||||
String eclass = (thrown != null) ?
|
||||
thrown.getClass().getName()
|
||||
: "";
|
||||
json.writeStringField(EXCEPTION_CLASS, eclass);
|
||||
String[] stackTrace = ti.getThrowableStrRep();
|
||||
json.writeArrayFieldStart(STACK);
|
||||
for (String row : stackTrace) {
|
||||
json.writeString(row);
|
||||
}
|
||||
json.writeEndArray();
|
||||
}
|
||||
json.writeEndObject();
|
||||
json.flush();
|
||||
json.close();
|
||||
return writer;
|
||||
}
|
||||
|
||||
/**
|
||||
* This appender does not ignore throwables
|
||||
*
|
||||
* @return false, always
|
||||
*/
|
||||
@Override
|
||||
public boolean ignoresThrowable() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Do nothing
|
||||
*/
|
||||
@Override
|
||||
public void activateOptions() {
|
||||
}
|
||||
|
||||
/**
|
||||
* For use in tests
|
||||
*
|
||||
* @param json incoming JSON to parse
|
||||
* @return a node tree
|
||||
* @throws IOException on any parsing problems
|
||||
*/
|
||||
public static ContainerNode parse(String json) throws IOException {
|
||||
JsonNode jsonNode = READER.readTree(json);
|
||||
if (!(jsonNode instanceof ContainerNode)) {
|
||||
throw new IOException("Wrong JSON data: " + json);
|
||||
}
|
||||
return (ContainerNode) jsonNode;
|
||||
}
|
||||
}
|
|
@ -34,10 +34,8 @@ import javax.servlet.http.HttpServletResponse;
|
|||
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Jdk14Logger;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -48,9 +46,12 @@ import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
|
|||
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
|
||||
import org.apache.hadoop.security.ssl.SSLFactory;
|
||||
import org.apache.hadoop.util.GenericOptionsParser;
|
||||
import org.apache.hadoop.util.GenericsUtil;
|
||||
import org.apache.hadoop.util.ServletUtil;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
/**
|
||||
* Change log level in runtime.
|
||||
|
@ -340,21 +341,17 @@ public class LogLevel {
|
|||
out.println(MARKER
|
||||
+ "Submitted Class Name: <b>" + logName + "</b><br />");
|
||||
|
||||
Log log = LogFactory.getLog(logName);
|
||||
org.slf4j.Logger log = LoggerFactory.getLogger(logName);
|
||||
out.println(MARKER
|
||||
+ "Log Class: <b>" + log.getClass().getName() +"</b><br />");
|
||||
if (level != null) {
|
||||
out.println(MARKER + "Submitted Level: <b>" + level + "</b><br />");
|
||||
}
|
||||
|
||||
if (log instanceof Log4JLogger) {
|
||||
process(((Log4JLogger)log).getLogger(), level, out);
|
||||
}
|
||||
else if (log instanceof Jdk14Logger) {
|
||||
process(((Jdk14Logger)log).getLogger(), level, out);
|
||||
}
|
||||
else {
|
||||
out.println("Sorry, " + log.getClass() + " not supported.<br />");
|
||||
if (GenericsUtil.isLog4jLogger(logName)) {
|
||||
process(Logger.getLogger(logName), level, out);
|
||||
} else {
|
||||
out.println("Sorry, setting log level is only supported for log4j loggers.<br />");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -371,14 +368,14 @@ public class LogLevel {
|
|||
+ "<input type='submit' value='Set Log Level' />"
|
||||
+ "</form>";
|
||||
|
||||
private static void process(org.apache.log4j.Logger log, String level,
|
||||
private static void process(Logger log, String level,
|
||||
PrintWriter out) throws IOException {
|
||||
if (level != null) {
|
||||
if (!level.equalsIgnoreCase(org.apache.log4j.Level.toLevel(level)
|
||||
if (!level.equalsIgnoreCase(Level.toLevel(level)
|
||||
.toString())) {
|
||||
out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
|
||||
} else {
|
||||
log.setLevel(org.apache.log4j.Level.toLevel(level));
|
||||
log.setLevel(Level.toLevel(level));
|
||||
out.println(MARKER + "Setting Level to " + level + " ...<br />");
|
||||
}
|
||||
}
|
||||
|
@ -386,21 +383,5 @@ public class LogLevel {
|
|||
+ "Effective Level: <b>" + log.getEffectiveLevel() + "</b><br />");
|
||||
}
|
||||
|
||||
private static void process(java.util.logging.Logger log, String level,
|
||||
PrintWriter out) throws IOException {
|
||||
if (level != null) {
|
||||
String levelToUpperCase = level.toUpperCase();
|
||||
try {
|
||||
log.setLevel(java.util.logging.Level.parse(levelToUpperCase));
|
||||
} catch (IllegalArgumentException e) {
|
||||
out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
|
||||
}
|
||||
out.println(MARKER + "Setting Level to " + level + " ...<br />");
|
||||
}
|
||||
|
||||
java.util.logging.Level lev;
|
||||
for(; (lev = log.getLevel()) == null; log = log.getParent());
|
||||
out.println(MARKER + "Effective Level: <b>" + lev + "</b><br />");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ import org.apache.hadoop.util.Timer;
|
|||
* <p>This class can also be used to coordinate multiple logging points; see
|
||||
* {@link #record(String, long, double...)} for more details.
|
||||
*
|
||||
* <p>This class is not thread-safe.
|
||||
* <p>This class is thread-safe.
|
||||
*/
|
||||
public class LogThrottlingHelper {
|
||||
|
||||
|
@ -192,7 +192,7 @@ public class LogThrottlingHelper {
|
|||
* @return A LogAction indicating whether or not the caller should write to
|
||||
* its log.
|
||||
*/
|
||||
public LogAction record(double... values) {
|
||||
public synchronized LogAction record(double... values) {
|
||||
return record(DEFAULT_RECORDER_NAME, timer.monotonicNow(), values);
|
||||
}
|
||||
|
||||
|
@ -244,7 +244,7 @@ public class LogThrottlingHelper {
|
|||
*
|
||||
* @see #record(double...)
|
||||
*/
|
||||
public LogAction record(String recorderName, long currentTimeMs,
|
||||
public synchronized LogAction record(String recorderName, long currentTimeMs,
|
||||
double... values) {
|
||||
if (primaryRecorderName == null) {
|
||||
primaryRecorderName = recorderName;
|
||||
|
@ -287,7 +287,7 @@ public class LogThrottlingHelper {
|
|||
* @param idx The index value.
|
||||
* @return The summary information.
|
||||
*/
|
||||
public SummaryStatistics getCurrentStats(String recorderName, int idx) {
|
||||
public synchronized SummaryStatistics getCurrentStats(String recorderName, int idx) {
|
||||
LoggingAction currentLog = currentLogs.get(recorderName);
|
||||
if (currentLog != null) {
|
||||
return currentLog.getStats(idx);
|
||||
|
@ -314,6 +314,13 @@ public class LogThrottlingHelper {
|
|||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public synchronized void reset() {
|
||||
primaryRecorderName = null;
|
||||
currentLogs.clear();
|
||||
lastLogTimestampMs = Long.MIN_VALUE;
|
||||
}
|
||||
|
||||
/**
|
||||
* A standard log action which keeps track of all of the values which have
|
||||
* been logged. This is also used for internal bookkeeping via its private
|
||||
|
|
|
@ -314,7 +314,8 @@ public final class SecurityUtil {
|
|||
|
||||
String keytabFilename = conf.get(keytabFileKey);
|
||||
if (keytabFilename == null || keytabFilename.length() == 0) {
|
||||
throw new IOException("Running in secure mode, but config doesn't have a keytab");
|
||||
throw new IOException(
|
||||
"Running in secure mode, but config doesn't have a keytab for key: " + keytabFileKey);
|
||||
}
|
||||
|
||||
String principalConfig = conf.get(userNameKey, System
|
||||
|
|
|
@ -529,6 +529,18 @@ public class UserGroupInformation {
|
|||
user.setLogin(login);
|
||||
}
|
||||
|
||||
/** This method checks for a successful Kerberos login
|
||||
* and returns true by default if it is not using Kerberos.
|
||||
*
|
||||
* @return true on successful login
|
||||
*/
|
||||
public boolean isLoginSuccess() {
|
||||
LoginContext login = user.getLogin();
|
||||
return (login instanceof HadoopLoginContext)
|
||||
? ((HadoopLoginContext) login).isLoginSuccess()
|
||||
: true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the last login time for logged in user
|
||||
* @param loginTime the number of milliseconds since the beginning of time
|
||||
|
@ -1276,6 +1288,23 @@ public class UserGroupInformation {
|
|||
relogin(login, ignoreLastLoginTime);
|
||||
}
|
||||
|
||||
/**
|
||||
* Force re-Login a user in from the ticket cache irrespective of the last
|
||||
* login time. This method assumes that login had happened already. The
|
||||
* Subject field of this UserGroupInformation object is updated to have the
|
||||
* new credentials.
|
||||
*
|
||||
* @throws IOException
|
||||
* raised on errors performing I/O.
|
||||
* @throws KerberosAuthException
|
||||
* on a failure
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public void forceReloginFromTicketCache() throws IOException {
|
||||
reloginFromTicketCache(true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Re-Login a user in from the ticket cache. This
|
||||
* method assumes that login had happened already.
|
||||
|
@ -1287,6 +1316,11 @@ public class UserGroupInformation {
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public void reloginFromTicketCache() throws IOException {
|
||||
reloginFromTicketCache(false);
|
||||
}
|
||||
|
||||
private void reloginFromTicketCache(boolean ignoreLastLoginTime)
|
||||
throws IOException {
|
||||
if (!shouldRelogin() || !isFromTicket()) {
|
||||
return;
|
||||
}
|
||||
|
@ -1294,7 +1328,7 @@ public class UserGroupInformation {
|
|||
if (login == null) {
|
||||
throw new KerberosAuthException(MUST_FIRST_LOGIN);
|
||||
}
|
||||
relogin(login, false);
|
||||
relogin(login, ignoreLastLoginTime);
|
||||
}
|
||||
|
||||
private void relogin(HadoopLoginContext login, boolean ignoreLastLoginTime)
|
||||
|
@ -2083,6 +2117,11 @@ public class UserGroupInformation {
|
|||
this.conf = conf;
|
||||
}
|
||||
|
||||
/** Get the login status. */
|
||||
public boolean isLoginSuccess() {
|
||||
return isLoggedIn.get();
|
||||
}
|
||||
|
||||
String getAppName() {
|
||||
return appName;
|
||||
}
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Support for service-level authorization.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
package org.apache.hadoop.security.authorize;
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Filters for HTTP service security.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
package org.apache.hadoop.security.http;
|
||||
|
|
|
@ -19,7 +19,9 @@
|
|||
package org.apache.hadoop.security.token.delegation;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.security.MessageDigest;
|
||||
import java.util.ArrayList;
|
||||
|
@ -41,6 +43,8 @@ import org.apache.hadoop.fs.statistics.DurationTrackerFactory;
|
|||
import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding;
|
||||
import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.metrics2.annotation.Metric;
|
||||
import org.apache.hadoop.metrics2.annotation.Metrics;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
|
@ -441,8 +445,9 @@ extends AbstractDelegationTokenIdentifier>
|
|||
/**
|
||||
* Update the current master key for generating delegation tokens
|
||||
* It should be called only by tokenRemoverThread.
|
||||
* @throws IOException raised on errors performing I/O.
|
||||
*/
|
||||
void rollMasterKey() throws IOException {
|
||||
protected void rollMasterKey() throws IOException {
|
||||
synchronized (this) {
|
||||
removeExpiredKeys();
|
||||
/* set final expiry date for retiring currentKey */
|
||||
|
@ -677,11 +682,15 @@ extends AbstractDelegationTokenIdentifier>
|
|||
|
||||
/** Class to encapsulate a token's renew date and password. */
|
||||
@InterfaceStability.Evolving
|
||||
public static class DelegationTokenInformation {
|
||||
public static class DelegationTokenInformation implements Writable {
|
||||
long renewDate;
|
||||
byte[] password;
|
||||
String trackingId;
|
||||
|
||||
public DelegationTokenInformation() {
|
||||
this(0, null);
|
||||
}
|
||||
|
||||
public DelegationTokenInformation(long renewDate, byte[] password) {
|
||||
this(renewDate, password, null);
|
||||
}
|
||||
|
@ -711,6 +720,29 @@ extends AbstractDelegationTokenIdentifier>
|
|||
public String getTrackingId() {
|
||||
return trackingId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
WritableUtils.writeVLong(out, renewDate);
|
||||
if (password == null) {
|
||||
WritableUtils.writeVInt(out, -1);
|
||||
} else {
|
||||
WritableUtils.writeVInt(out, password.length);
|
||||
out.write(password);
|
||||
}
|
||||
WritableUtils.writeString(out, trackingId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
renewDate = WritableUtils.readVLong(in);
|
||||
int len = WritableUtils.readVInt(in);
|
||||
if (len > -1) {
|
||||
password = new byte[len];
|
||||
in.readFully(password);
|
||||
}
|
||||
trackingId = WritableUtils.readString(in);
|
||||
}
|
||||
}
|
||||
|
||||
/** Remove expired delegation tokens from cache */
|
||||
|
@ -866,9 +898,9 @@ extends AbstractDelegationTokenIdentifier>
|
|||
/**
|
||||
* Add token stats to the owner to token count mapping.
|
||||
*
|
||||
* @param id
|
||||
* @param id token id.
|
||||
*/
|
||||
private void addTokenForOwnerStats(TokenIdent id) {
|
||||
protected void addTokenForOwnerStats(TokenIdent id) {
|
||||
String realOwner = getTokenRealOwner(id);
|
||||
tokenOwnerStats.put(realOwner,
|
||||
tokenOwnerStats.getOrDefault(realOwner, 0L)+1);
|
||||
|
|
|
@ -0,0 +1,400 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.security.token.delegation;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.sql.SQLException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
||||
/**
|
||||
* An implementation of {@link AbstractDelegationTokenSecretManager} that
|
||||
* persists TokenIdentifiers and DelegationKeys in an existing SQL database.
|
||||
*/
|
||||
public abstract class SQLDelegationTokenSecretManager<TokenIdent
|
||||
extends AbstractDelegationTokenIdentifier>
|
||||
extends AbstractDelegationTokenSecretManager<TokenIdent> {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(SQLDelegationTokenSecretManager.class);
|
||||
|
||||
public static final String SQL_DTSM_CONF_PREFIX = "sql-dt-secret-manager.";
|
||||
private static final String SQL_DTSM_TOKEN_SEQNUM_BATCH_SIZE = SQL_DTSM_CONF_PREFIX
|
||||
+ "token.seqnum.batch.size";
|
||||
public static final int DEFAULT_SEQ_NUM_BATCH_SIZE = 10;
|
||||
|
||||
// Batch of sequence numbers that will be requested by the sequenceNumCounter.
|
||||
// A new batch is requested once the sequenceNums available to a secret manager are
|
||||
// exhausted, including during initialization.
|
||||
private final int seqNumBatchSize;
|
||||
|
||||
// Last sequenceNum in the current batch that has been allocated to a token.
|
||||
private int currentSeqNum;
|
||||
|
||||
// Max sequenceNum in the current batch that can be allocated to a token.
|
||||
// Unused sequenceNums in the current batch cannot be reused by other routers.
|
||||
private int currentMaxSeqNum;
|
||||
|
||||
public SQLDelegationTokenSecretManager(Configuration conf) {
|
||||
super(conf.getLong(DelegationTokenManager.UPDATE_INTERVAL,
|
||||
DelegationTokenManager.UPDATE_INTERVAL_DEFAULT) * 1000,
|
||||
conf.getLong(DelegationTokenManager.MAX_LIFETIME,
|
||||
DelegationTokenManager.MAX_LIFETIME_DEFAULT) * 1000,
|
||||
conf.getLong(DelegationTokenManager.RENEW_INTERVAL,
|
||||
DelegationTokenManager.RENEW_INTERVAL_DEFAULT) * 1000,
|
||||
conf.getLong(DelegationTokenManager.REMOVAL_SCAN_INTERVAL,
|
||||
DelegationTokenManager.REMOVAL_SCAN_INTERVAL_DEFAULT) * 1000);
|
||||
|
||||
this.seqNumBatchSize = conf.getInt(SQL_DTSM_TOKEN_SEQNUM_BATCH_SIZE,
|
||||
DEFAULT_SEQ_NUM_BATCH_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Persists a TokenIdentifier and its corresponding TokenInformation into
|
||||
* the SQL database. The TokenIdentifier is expected to be unique and any
|
||||
* duplicate token attempts will result in an IOException.
|
||||
* @param ident TokenIdentifier to persist.
|
||||
* @param tokenInfo DelegationTokenInformation associated with the TokenIdentifier.
|
||||
*/
|
||||
@Override
|
||||
protected void storeToken(TokenIdent ident,
|
||||
DelegationTokenInformation tokenInfo) throws IOException {
|
||||
try (ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
DataOutputStream dos = new DataOutputStream(bos)) {
|
||||
tokenInfo.write(dos);
|
||||
// Add token to SQL database
|
||||
insertToken(ident.getSequenceNumber(), ident.getBytes(), bos.toByteArray());
|
||||
// Add token to local cache
|
||||
super.storeToken(ident, tokenInfo);
|
||||
} catch (SQLException e) {
|
||||
throw new IOException("Failed to store token in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the TokenInformation of an existing TokenIdentifier in
|
||||
* the SQL database.
|
||||
* @param ident Existing TokenIdentifier in the SQL database.
|
||||
* @param tokenInfo Updated DelegationTokenInformation associated with the TokenIdentifier.
|
||||
*/
|
||||
@Override
|
||||
protected void updateToken(TokenIdent ident,
|
||||
DelegationTokenInformation tokenInfo) throws IOException {
|
||||
try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {
|
||||
try (DataOutputStream dos = new DataOutputStream(bos)) {
|
||||
tokenInfo.write(dos);
|
||||
// Update token in SQL database
|
||||
updateToken(ident.getSequenceNumber(), ident.getBytes(), bos.toByteArray());
|
||||
// Update token in local cache
|
||||
super.updateToken(ident, tokenInfo);
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
throw new IOException("Failed to update token in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancels a token by removing it from the SQL database. This will
|
||||
* call the corresponding method in {@link AbstractDelegationTokenSecretManager}
|
||||
* to perform validation and remove the token from the cache.
|
||||
* @return Identifier of the canceled token
|
||||
*/
|
||||
@Override
|
||||
public synchronized TokenIdent cancelToken(Token<TokenIdent> token,
|
||||
String canceller) throws IOException {
|
||||
try (ByteArrayInputStream bis = new ByteArrayInputStream(token.getIdentifier());
|
||||
DataInputStream din = new DataInputStream(bis)) {
|
||||
TokenIdent id = createIdentifier();
|
||||
id.readFields(din);
|
||||
|
||||
// Calling getTokenInfo to load token into local cache if not present.
|
||||
// super.cancelToken() requires token to be present in local cache.
|
||||
getTokenInfo(id);
|
||||
}
|
||||
|
||||
return super.cancelToken(token, canceller);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the existing TokenInformation from the SQL database to
|
||||
* invalidate it.
|
||||
* @param ident TokenInformation to remove from the SQL database.
|
||||
*/
|
||||
@Override
|
||||
protected void removeStoredToken(TokenIdent ident) throws IOException {
|
||||
try {
|
||||
deleteToken(ident.getSequenceNumber(), ident.getBytes());
|
||||
} catch (SQLException e) {
|
||||
LOG.warn("Failed to remove token in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtains the DelegationTokenInformation associated with the given
|
||||
* TokenIdentifier in the SQL database.
|
||||
* @param ident Existing TokenIdentifier in the SQL database.
|
||||
* @return DelegationTokenInformation that matches the given TokenIdentifier or
|
||||
* null if it doesn't exist in the database.
|
||||
*/
|
||||
@Override
|
||||
protected DelegationTokenInformation getTokenInfo(TokenIdent ident) {
|
||||
// Look for token in local cache
|
||||
DelegationTokenInformation tokenInfo = super.getTokenInfo(ident);
|
||||
|
||||
if (tokenInfo == null) {
|
||||
try {
|
||||
// Look for token in SQL database
|
||||
byte[] tokenInfoBytes = selectTokenInfo(ident.getSequenceNumber(), ident.getBytes());
|
||||
|
||||
if (tokenInfoBytes != null) {
|
||||
tokenInfo = new DelegationTokenInformation();
|
||||
try (ByteArrayInputStream bis = new ByteArrayInputStream(tokenInfoBytes)) {
|
||||
try (DataInputStream dis = new DataInputStream(bis)) {
|
||||
tokenInfo.readFields(dis);
|
||||
}
|
||||
}
|
||||
|
||||
// Update token in local cache
|
||||
currentTokens.put(ident, tokenInfo);
|
||||
}
|
||||
} catch (IOException | SQLException e) {
|
||||
LOG.error("Failed to get token in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
return tokenInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtains the value of the last reserved sequence number.
|
||||
* @return Last reserved sequence number.
|
||||
*/
|
||||
@Override
|
||||
public int getDelegationTokenSeqNum() {
|
||||
try {
|
||||
return selectSequenceNum();
|
||||
} catch (SQLException e) {
|
||||
throw new RuntimeException(
|
||||
"Failed to get token sequence number in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the value of the last reserved sequence number.
|
||||
* @param seqNum Value to update the sequence number to.
|
||||
*/
|
||||
@Override
|
||||
public void setDelegationTokenSeqNum(int seqNum) {
|
||||
try {
|
||||
updateSequenceNum(seqNum);
|
||||
} catch (SQLException e) {
|
||||
throw new RuntimeException(
|
||||
"Failed to update token sequence number in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtains the next available sequence number that can be allocated to a Token.
|
||||
* Sequence numbers need to be reserved using the shared sequenceNumberCounter once
|
||||
* the local batch has been exhausted, which handles sequenceNumber allocation
|
||||
* concurrently with other secret managers.
|
||||
* This method ensures that sequence numbers are incremental in a single secret manager,
|
||||
* but not across secret managers.
|
||||
* @return Next available sequence number.
|
||||
*/
|
||||
@Override
|
||||
public synchronized int incrementDelegationTokenSeqNum() {
|
||||
if (currentSeqNum >= currentMaxSeqNum) {
|
||||
try {
|
||||
// Request a new batch of sequence numbers and use the
|
||||
// lowest one available.
|
||||
currentSeqNum = incrementSequenceNum(seqNumBatchSize);
|
||||
currentMaxSeqNum = currentSeqNum + seqNumBatchSize;
|
||||
} catch (SQLException e) {
|
||||
throw new RuntimeException(
|
||||
"Failed to increment token sequence number in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
return ++currentSeqNum;
|
||||
}
|
||||
|
||||
/**
|
||||
* Persists a DelegationKey into the SQL database. The delegation keyId
|
||||
* is expected to be unique and any duplicate key attempts will result
|
||||
* in an IOException.
|
||||
* @param key DelegationKey to persist into the SQL database.
|
||||
*/
|
||||
@Override
|
||||
protected void storeDelegationKey(DelegationKey key) throws IOException {
|
||||
try (ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
DataOutputStream dos = new DataOutputStream(bos)) {
|
||||
key.write(dos);
|
||||
// Add delegation key to SQL database
|
||||
insertDelegationKey(key.getKeyId(), bos.toByteArray());
|
||||
// Add delegation key to local cache
|
||||
super.storeDelegationKey(key);
|
||||
} catch (SQLException e) {
|
||||
throw new IOException("Failed to store delegation key in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates an existing DelegationKey in the SQL database.
|
||||
* @param key Updated DelegationKey.
|
||||
*/
|
||||
@Override
|
||||
protected void updateDelegationKey(DelegationKey key) throws IOException {
|
||||
try (ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
DataOutputStream dos = new DataOutputStream(bos)) {
|
||||
key.write(dos);
|
||||
// Update delegation key in SQL database
|
||||
updateDelegationKey(key.getKeyId(), bos.toByteArray());
|
||||
// Update delegation key in local cache
|
||||
super.updateDelegationKey(key);
|
||||
} catch (SQLException e) {
|
||||
throw new IOException("Failed to update delegation key in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the existing DelegationKey from the SQL database to
|
||||
* invalidate it.
|
||||
* @param key DelegationKey to remove from the SQL database.
|
||||
*/
|
||||
@Override
|
||||
protected void removeStoredMasterKey(DelegationKey key) {
|
||||
try {
|
||||
deleteDelegationKey(key.getKeyId());
|
||||
} catch (SQLException e) {
|
||||
LOG.warn("Failed to remove delegation key in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtains the DelegationKey from the SQL database.
|
||||
* @param keyId KeyId of the DelegationKey to obtain.
|
||||
* @return DelegationKey that matches the given keyId or null
|
||||
* if it doesn't exist in the database.
|
||||
*/
|
||||
@Override
|
||||
protected DelegationKey getDelegationKey(int keyId) {
|
||||
// Look for delegation key in local cache
|
||||
DelegationKey delegationKey = super.getDelegationKey(keyId);
|
||||
|
||||
if (delegationKey == null) {
|
||||
try {
|
||||
// Look for delegation key in SQL database
|
||||
byte[] delegationKeyBytes = selectDelegationKey(keyId);
|
||||
|
||||
if (delegationKeyBytes != null) {
|
||||
delegationKey = new DelegationKey();
|
||||
try (ByteArrayInputStream bis = new ByteArrayInputStream(delegationKeyBytes)) {
|
||||
try (DataInputStream dis = new DataInputStream(bis)) {
|
||||
delegationKey.readFields(dis);
|
||||
}
|
||||
}
|
||||
|
||||
// Update delegation key in local cache
|
||||
allKeys.put(keyId, delegationKey);
|
||||
}
|
||||
} catch (IOException | SQLException e) {
|
||||
LOG.error("Failed to get delegation key in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
return delegationKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtains the value of the last delegation key id.
|
||||
* @return Last delegation key id.
|
||||
*/
|
||||
@Override
|
||||
public int getCurrentKeyId() {
|
||||
try {
|
||||
return selectKeyId();
|
||||
} catch (SQLException e) {
|
||||
throw new RuntimeException(
|
||||
"Failed to get delegation key id in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the value of the last delegation key id.
|
||||
* @param keyId Value to update the delegation key id to.
|
||||
*/
|
||||
@Override
|
||||
public void setCurrentKeyId(int keyId) {
|
||||
try {
|
||||
updateKeyId(keyId);
|
||||
} catch (SQLException e) {
|
||||
throw new RuntimeException(
|
||||
"Failed to set delegation key id in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtains the next available delegation key id that can be allocated to a DelegationKey.
|
||||
* Delegation key id need to be reserved using the shared delegationKeyIdCounter,
|
||||
* which handles keyId allocation concurrently with other secret managers.
|
||||
* @return Next available delegation key id.
|
||||
*/
|
||||
@Override
|
||||
public int incrementCurrentKeyId() {
|
||||
try {
|
||||
return incrementKeyId(1) + 1;
|
||||
} catch (SQLException e) {
|
||||
throw new RuntimeException(
|
||||
"Failed to increment delegation key id in SQL secret manager", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Token operations in SQL database
|
||||
protected abstract byte[] selectTokenInfo(int sequenceNum, byte[] tokenIdentifier)
|
||||
throws SQLException;
|
||||
protected abstract void insertToken(int sequenceNum, byte[] tokenIdentifier, byte[] tokenInfo)
|
||||
throws SQLException;
|
||||
protected abstract void updateToken(int sequenceNum, byte[] tokenIdentifier, byte[] tokenInfo)
|
||||
throws SQLException;
|
||||
protected abstract void deleteToken(int sequenceNum, byte[] tokenIdentifier)
|
||||
throws SQLException;
|
||||
// Delegation key operations in SQL database
|
||||
protected abstract byte[] selectDelegationKey(int keyId) throws SQLException;
|
||||
protected abstract void insertDelegationKey(int keyId, byte[] delegationKey)
|
||||
throws SQLException;
|
||||
protected abstract void updateDelegationKey(int keyId, byte[] delegationKey)
|
||||
throws SQLException;
|
||||
protected abstract void deleteDelegationKey(int keyId) throws SQLException;
|
||||
// Counter operations in SQL database
|
||||
protected abstract int selectSequenceNum() throws SQLException;
|
||||
protected abstract void updateSequenceNum(int value) throws SQLException;
|
||||
protected abstract int incrementSequenceNum(int amount) throws SQLException;
|
||||
protected abstract int selectKeyId() throws SQLException;
|
||||
protected abstract void updateKeyId(int value) throws SQLException;
|
||||
protected abstract int incrementKeyId(int amount) throws SQLException;
|
||||
}
|
|
@ -15,6 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* ZooKeeper secret manager for TokenIdentifiers and DelegationKeys.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
|
||||
@InterfaceStability.Evolving
|
||||
package org.apache.hadoop.security.token.delegation;
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Support for delegation tokens.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
package org.apache.hadoop.security.token;
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.service;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -75,9 +74,10 @@ public final class ServiceOperations {
|
|||
* @param log the log to warn at
|
||||
* @param service a service; may be null
|
||||
* @return any exception that was caught; null if none was.
|
||||
* @see ServiceOperations#stopQuietly(Service)
|
||||
* @deprecated to be removed with 3.4.0. Use {@link #stopQuietly(Logger, Service)} instead.
|
||||
*/
|
||||
public static Exception stopQuietly(Log log, Service service) {
|
||||
@Deprecated
|
||||
public static Exception stopQuietly(org.apache.commons.logging.Log log, Service service) {
|
||||
try {
|
||||
stop(service);
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Support for services.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
package org.apache.hadoop.service;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.util;
|
|||
|
||||
import java.lang.reflect.Array;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -33,6 +34,14 @@ import org.slf4j.LoggerFactory;
|
|||
@InterfaceStability.Unstable
|
||||
public class GenericsUtil {
|
||||
|
||||
private static final String SLF4J_LOG4J_ADAPTER_CLASS = "org.slf4j.impl.Log4jLoggerAdapter";
|
||||
|
||||
/**
|
||||
* Set to false only if log4j adapter class is not found in the classpath. Once set to false,
|
||||
* the utility method should not bother re-loading class again.
|
||||
*/
|
||||
private static final AtomicBoolean IS_LOG4J_LOGGER = new AtomicBoolean(true);
|
||||
|
||||
/**
|
||||
* Returns the Class object (of type <code>Class<T></code>) of the
|
||||
* argument of type <code>T</code>.
|
||||
|
@ -87,12 +96,27 @@ public class GenericsUtil {
|
|||
if (clazz == null) {
|
||||
return false;
|
||||
}
|
||||
Logger log = LoggerFactory.getLogger(clazz);
|
||||
return isLog4jLogger(clazz.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether the log of the given logger is of Log4J implementation.
|
||||
*
|
||||
* @param logger the logger name, usually class name as string.
|
||||
* @return true if the logger uses Log4J implementation.
|
||||
*/
|
||||
public static boolean isLog4jLogger(String logger) {
|
||||
if (logger == null || !IS_LOG4J_LOGGER.get()) {
|
||||
return false;
|
||||
}
|
||||
Logger log = LoggerFactory.getLogger(logger);
|
||||
try {
|
||||
Class log4jClass = Class.forName("org.slf4j.impl.Log4jLoggerAdapter");
|
||||
Class<?> log4jClass = Class.forName(SLF4J_LOG4J_ADAPTER_CLASS);
|
||||
return log4jClass.isInstance(log);
|
||||
} catch (ClassNotFoundException e) {
|
||||
IS_LOG4J_LOGGER.set(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import java.util.function.Consumer;
|
|||
*
|
||||
* This class does not support null element.
|
||||
*
|
||||
* This class is not thread safe.
|
||||
* This class is thread safe.
|
||||
*
|
||||
* @param <K> Key type for looking up the elements
|
||||
* @param <E> Element type, which must be
|
||||
|
|
|
@ -1,78 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
class LogAdapter {
|
||||
private Log LOG;
|
||||
private Logger LOGGER;
|
||||
|
||||
private LogAdapter(Log LOG) {
|
||||
this.LOG = LOG;
|
||||
}
|
||||
|
||||
private LogAdapter(Logger LOGGER) {
|
||||
this.LOGGER = LOGGER;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated use {@link #create(Logger)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static LogAdapter create(Log LOG) {
|
||||
return new LogAdapter(LOG);
|
||||
}
|
||||
|
||||
public static LogAdapter create(Logger LOGGER) {
|
||||
return new LogAdapter(LOGGER);
|
||||
}
|
||||
|
||||
public void info(String msg) {
|
||||
if (LOG != null) {
|
||||
LOG.info(msg);
|
||||
} else if (LOGGER != null) {
|
||||
LOGGER.info(msg);
|
||||
}
|
||||
}
|
||||
|
||||
public void warn(String msg, Throwable t) {
|
||||
if (LOG != null) {
|
||||
LOG.warn(msg, t);
|
||||
} else if (LOGGER != null) {
|
||||
LOGGER.warn(msg, t);
|
||||
}
|
||||
}
|
||||
|
||||
public void debug(Throwable t) {
|
||||
if (LOG != null) {
|
||||
LOG.debug(t);
|
||||
} else if (LOGGER != null) {
|
||||
LOGGER.debug("", t);
|
||||
}
|
||||
}
|
||||
|
||||
public void error(String msg) {
|
||||
if (LOG != null) {
|
||||
LOG.error(msg);
|
||||
} else if (LOGGER != null) {
|
||||
LOGGER.error(msg);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -36,7 +36,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
|
@ -228,10 +227,12 @@ public class ReflectionUtils {
|
|||
* @param log the logger that logs the stack trace
|
||||
* @param title a descriptive title for the call stacks
|
||||
* @param minInterval the minimum time from the last
|
||||
* @deprecated to be removed with 3.4.0. Use {@link #logThreadInfo(Logger, String, long)} instead.
|
||||
*/
|
||||
public static void logThreadInfo(Log log,
|
||||
String title,
|
||||
long minInterval) {
|
||||
@Deprecated
|
||||
public static void logThreadInfo(org.apache.commons.logging.Log log,
|
||||
String title,
|
||||
long minInterval) {
|
||||
boolean dumpStack = false;
|
||||
if (log.isInfoEnabled()) {
|
||||
synchronized (ReflectionUtils.class) {
|
||||
|
|
|
@ -18,10 +18,10 @@
|
|||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import sun.misc.Signal;
|
||||
import sun.misc.SignalHandler;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
|
@ -42,11 +42,11 @@ public enum SignalLogger {
|
|||
* Our signal handler.
|
||||
*/
|
||||
private static class Handler implements SignalHandler {
|
||||
final private LogAdapter LOG;
|
||||
final private Logger log;
|
||||
final private SignalHandler prevHandler;
|
||||
|
||||
Handler(String name, LogAdapter LOG) {
|
||||
this.LOG = LOG;
|
||||
Handler(String name, Logger log) {
|
||||
this.log = log;
|
||||
prevHandler = Signal.handle(new Signal(name), this);
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,7 @@ public enum SignalLogger {
|
|||
*/
|
||||
@Override
|
||||
public void handle(Signal signal) {
|
||||
LOG.error("RECEIVED SIGNAL " + signal.getNumber() +
|
||||
log.error("RECEIVED SIGNAL " + signal.getNumber() +
|
||||
": SIG" + signal.getName());
|
||||
prevHandler.handle(signal);
|
||||
}
|
||||
|
@ -66,13 +66,9 @@ public enum SignalLogger {
|
|||
/**
|
||||
* Register some signal handlers.
|
||||
*
|
||||
* @param LOG The log4j logfile to use in the signal handlers.
|
||||
* @param log The log4j logfile to use in the signal handlers.
|
||||
*/
|
||||
public void register(final Log LOG) {
|
||||
register(LogAdapter.create(LOG));
|
||||
}
|
||||
|
||||
void register(final LogAdapter LOG) {
|
||||
public void register(final Logger log) {
|
||||
if (registered) {
|
||||
throw new IllegalStateException("Can't re-install the signal handlers.");
|
||||
}
|
||||
|
@ -83,15 +79,15 @@ public enum SignalLogger {
|
|||
String separator = "";
|
||||
for (String signalName : SIGNALS) {
|
||||
try {
|
||||
new Handler(signalName, LOG);
|
||||
new Handler(signalName, log);
|
||||
bld.append(separator)
|
||||
.append(signalName);
|
||||
separator = ", ";
|
||||
} catch (Exception e) {
|
||||
LOG.debug(e);
|
||||
log.debug("Error: ", e);
|
||||
}
|
||||
}
|
||||
bld.append("]");
|
||||
LOG.info(bld.toString());
|
||||
log.info(bld.toString());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -740,42 +740,26 @@ public class StringUtils {
|
|||
* Print a log message for starting up and shutting down
|
||||
* @param clazz the class of the server
|
||||
* @param args arguments
|
||||
* @param LOG the target log object
|
||||
* @param log the target log object
|
||||
*/
|
||||
public static void startupShutdownMessage(Class<?> clazz, String[] args,
|
||||
final org.apache.commons.logging.Log LOG) {
|
||||
startupShutdownMessage(clazz, args, LogAdapter.create(LOG));
|
||||
}
|
||||
|
||||
/**
|
||||
* Print a log message for starting up and shutting down
|
||||
* @param clazz the class of the server
|
||||
* @param args arguments
|
||||
* @param LOG the target log object
|
||||
*/
|
||||
public static void startupShutdownMessage(Class<?> clazz, String[] args,
|
||||
final org.slf4j.Logger LOG) {
|
||||
startupShutdownMessage(clazz, args, LogAdapter.create(LOG));
|
||||
}
|
||||
|
||||
static void startupShutdownMessage(Class<?> clazz, String[] args,
|
||||
final LogAdapter LOG) {
|
||||
final org.slf4j.Logger log) {
|
||||
final String hostname = NetUtils.getHostname();
|
||||
final String classname = clazz.getSimpleName();
|
||||
LOG.info(createStartupShutdownMessage(classname, hostname, args));
|
||||
log.info(createStartupShutdownMessage(classname, hostname, args));
|
||||
|
||||
if (SystemUtils.IS_OS_UNIX) {
|
||||
try {
|
||||
SignalLogger.INSTANCE.register(LOG);
|
||||
SignalLogger.INSTANCE.register(log);
|
||||
} catch (Throwable t) {
|
||||
LOG.warn("failed to register any UNIX signal loggers: ", t);
|
||||
log.warn("failed to register any UNIX signal loggers: ", t);
|
||||
}
|
||||
}
|
||||
ShutdownHookManager.get().addShutdownHook(
|
||||
new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
|
||||
log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
|
||||
"Shutting down " + classname + " at " + hostname}));
|
||||
LogManager.shutdown();
|
||||
}
|
||||
|
|
|
@ -93,6 +93,10 @@ public class VersionInfo {
|
|||
return info.getProperty("protocVersion", "Unknown");
|
||||
}
|
||||
|
||||
protected String _getCompilePlatform() {
|
||||
return info.getProperty("compilePlatform", "Unknown");
|
||||
}
|
||||
|
||||
private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("common");
|
||||
/**
|
||||
* Get the Hadoop version.
|
||||
|
@ -167,12 +171,21 @@ public class VersionInfo {
|
|||
return COMMON_VERSION_INFO._getProtocVersion();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the OS platform used for the build.
|
||||
* @return the OS platform
|
||||
*/
|
||||
public static String getCompilePlatform() {
|
||||
return COMMON_VERSION_INFO._getCompilePlatform();
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
LOG.debug("version: "+ getVersion());
|
||||
System.out.println("Hadoop " + getVersion());
|
||||
System.out.println("Source code repository " + getUrl() + " -r " +
|
||||
getRevision());
|
||||
System.out.println("Compiled by " + getUser() + " on " + getDate());
|
||||
System.out.println("Compiled on platform " + getCompilePlatform());
|
||||
System.out.println("Compiled with protoc " + getProtocVersion());
|
||||
System.out.println("From source with checksum " + getSrcChecksum());
|
||||
System.out.println("This command was run using " +
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.slf4j.LoggerFactory;
|
|||
import org.xml.sax.SAXException;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
* General xml utilities.
|
||||
|
@ -59,6 +60,11 @@ public class XMLUtils {
|
|||
public static final String VALIDATION =
|
||||
"http://xml.org/sax/features/validation";
|
||||
|
||||
private static final AtomicBoolean CAN_SET_TRANSFORMER_ACCESS_EXTERNAL_DTD =
|
||||
new AtomicBoolean(true);
|
||||
private static final AtomicBoolean CAN_SET_TRANSFORMER_ACCESS_EXTERNAL_STYLESHEET =
|
||||
new AtomicBoolean(true);
|
||||
|
||||
/**
|
||||
* Transform input xml given a stylesheet.
|
||||
*
|
||||
|
@ -143,8 +149,7 @@ public class XMLUtils {
|
|||
throws TransformerConfigurationException {
|
||||
TransformerFactory trfactory = TransformerFactory.newInstance();
|
||||
trfactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
|
||||
bestEffortSetAttribute(trfactory, XMLConstants.ACCESS_EXTERNAL_DTD, "");
|
||||
bestEffortSetAttribute(trfactory, XMLConstants.ACCESS_EXTERNAL_STYLESHEET, "");
|
||||
setOptionalSecureTransformerAttributes(trfactory);
|
||||
return trfactory;
|
||||
}
|
||||
|
||||
|
@ -161,29 +166,45 @@ public class XMLUtils {
|
|||
throws TransformerConfigurationException {
|
||||
SAXTransformerFactory trfactory = (SAXTransformerFactory) SAXTransformerFactory.newInstance();
|
||||
trfactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
|
||||
bestEffortSetAttribute(trfactory, XMLConstants.ACCESS_EXTERNAL_DTD, "");
|
||||
bestEffortSetAttribute(trfactory, XMLConstants.ACCESS_EXTERNAL_STYLESHEET, "");
|
||||
setOptionalSecureTransformerAttributes(trfactory);
|
||||
return trfactory;
|
||||
}
|
||||
|
||||
/**
|
||||
* These attributes are recommended for maximum security but some JAXP transformers do
|
||||
* not support them. If at any stage, we fail to set these attributes, then we won't try again
|
||||
* for subsequent transformers.
|
||||
*
|
||||
* @param transformerFactory to update
|
||||
*/
|
||||
private static void setOptionalSecureTransformerAttributes(
|
||||
TransformerFactory transformerFactory) {
|
||||
bestEffortSetAttribute(transformerFactory, CAN_SET_TRANSFORMER_ACCESS_EXTERNAL_DTD,
|
||||
XMLConstants.ACCESS_EXTERNAL_DTD, "");
|
||||
bestEffortSetAttribute(transformerFactory, CAN_SET_TRANSFORMER_ACCESS_EXTERNAL_STYLESHEET,
|
||||
XMLConstants.ACCESS_EXTERNAL_STYLESHEET, "");
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an attribute value on a {@link TransformerFactory}. If the TransformerFactory
|
||||
* does not support the attribute, the method just returns <code>false</code> and
|
||||
* logs the issue at debug level.
|
||||
*
|
||||
* @param transformerFactory to update
|
||||
* @param flag that indicates whether to do the update and the flag can be set to
|
||||
* <code>false</code> if an update fails
|
||||
* @param name of the attribute to set
|
||||
* @param value to set on the attribute
|
||||
* @return whether the attribute was successfully set
|
||||
*/
|
||||
static boolean bestEffortSetAttribute(TransformerFactory transformerFactory,
|
||||
String name, Object value) {
|
||||
try {
|
||||
transformerFactory.setAttribute(name, value);
|
||||
return true;
|
||||
} catch (Throwable t) {
|
||||
LOG.debug("Issue setting TransformerFactory attribute {}: {}", name, t.toString());
|
||||
static void bestEffortSetAttribute(TransformerFactory transformerFactory, AtomicBoolean flag,
|
||||
String name, Object value) {
|
||||
if (flag.get()) {
|
||||
try {
|
||||
transformerFactory.setAttribute(name, value);
|
||||
} catch (Throwable t) {
|
||||
flag.set(false);
|
||||
LOG.debug("Issue setting TransformerFactory attribute {}: {}", name, t.toString());
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/*
|
||||
* *
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,9 +14,11 @@
|
|||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* /
|
||||
*/
|
||||
|
||||
/**
|
||||
* Support for concurrent execution.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.util.concurrent;
|
||||
|
|
|
@ -24,3 +24,4 @@ date=${version-info.build.time}
|
|||
url=${version-info.scm.uri}
|
||||
srcChecksum=${version-info.source.md5}
|
||||
protocVersion=${hadoop.protobuf.version}
|
||||
compilePlatform=${os.detected.classifier}
|
||||
|
|
|
@ -975,7 +975,7 @@ this will be in the bucket; the `rm` operation will then take time proportional
|
|||
to the size of the data. Furthermore, the deleted files will continue to incur
|
||||
storage costs.
|
||||
|
||||
To avoid this, use the the `-skipTrash` option.
|
||||
To avoid this, use the `-skipTrash` option.
|
||||
|
||||
```bash
|
||||
hadoop fs -rm -skipTrash s3a://bucket/dataset
|
||||
|
|
|
@ -220,7 +220,7 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
|
|||
| `WarmUpEDEKTimeNumOps` | Total number of warming up EDEK |
|
||||
| `WarmUpEDEKTimeAvgTime` | Average time of warming up EDEK in milliseconds |
|
||||
| `WarmUpEDEKTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in warming up EDEK in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||
| `ResourceCheckTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of of NameNode resource check latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||
| `ResourceCheckTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of NameNode resource check latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||
| `EditLogTailTimeNumOps` | Total number of times the standby NameNode tailed the edit log |
|
||||
| `EditLogTailTimeAvgTime` | Average time (in milliseconds) spent by standby NameNode in tailing edit log |
|
||||
| `EditLogTailTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in tailing edit logs by standby NameNode in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||
|
@ -370,6 +370,9 @@ Each metrics record contains tags such as SessionId and Hostname as additional i
|
|||
|:---- |:---- |
|
||||
| `BytesWritten` | Total number of bytes written to DataNode |
|
||||
| `BytesRead` | Total number of bytes read from DataNode |
|
||||
| `ReadTransferRateNumOps` | Total number of data read transfers |
|
||||
| `ReadTransferRateAvgTime` | Average transfer rate of bytes read from DataNode, measured in bytes per second. |
|
||||
| `ReadTransferRate`*num*`s(50/75/90/95/99)thPercentileRate` | The 50/75/90/95/99th percentile of the transfer rate of bytes read from DataNode, measured in bytes per second. |
|
||||
| `BlocksWritten` | Total number of blocks written to DataNode |
|
||||
| `BlocksRead` | Total number of blocks read from DataNode |
|
||||
| `BlocksReplicated` | Total number of blocks replicated |
|
||||
|
@ -589,17 +592,19 @@ StateStoreMetrics
|
|||
-----------------
|
||||
StateStoreMetrics shows the statistics of the State Store component in Router-based federation.
|
||||
|
||||
| Name | Description |
|
||||
|:---- |:---- |
|
||||
| `ReadsNumOps` | Number of GET transactions for State Store within an interval time of metric |
|
||||
| `ReadsAvgTime` | Average time of GET transactions for State Store in milliseconds |
|
||||
| `WritesNumOps` | Number of PUT transactions for State Store within an interval time of metric |
|
||||
| `WritesAvgTime` | Average time of PUT transactions for State Store in milliseconds |
|
||||
| `RemovesNumOps` | Number of REMOVE transactions for State Store within an interval time of metric |
|
||||
| `RemovesAvgTime` | Average time of REMOVE transactions for State Store in milliseconds |
|
||||
| `FailuresNumOps` | Number of failed transactions for State Store within an interval time of metric |
|
||||
| `FailuresAvgTime` | Average time of failed transactions for State Store in milliseconds |
|
||||
| `Cache`*BaseRecord*`Size` | Number of store records to cache in State Store |
|
||||
| Name | Description |
|
||||
|:------------------------------------------|:-----------------------------------------------------------------------------------|
|
||||
| `ReadsNumOps` | Number of GET transactions for State Store within an interval time of metric |
|
||||
| `ReadsAvgTime` | Average time of GET transactions for State Store in milliseconds |
|
||||
| `WritesNumOps` | Number of PUT transactions for State Store within an interval time of metric |
|
||||
| `WritesAvgTime` | Average time of PUT transactions for State Store in milliseconds |
|
||||
| `RemovesNumOps` | Number of REMOVE transactions for State Store within an interval time of metric |
|
||||
| `RemovesAvgTime` | Average time of REMOVE transactions for State Store in milliseconds |
|
||||
| `FailuresNumOps` | Number of failed transactions for State Store within an interval time of metric |
|
||||
| `FailuresAvgTime` | Average time of failed transactions for State Store in milliseconds |
|
||||
| `Cache`*BaseRecord*`Size` | Number of store records to cache in State Store |
|
||||
| `Cache`*BaseRecord*`LoadNumOps` | Number of times store records are loaded in the State Store Cache from State Store |
|
||||
| `Cache`*BaseRecord*`LoadAvgTime` | Average time of loading State Store Cache from State Store in milliseconds |
|
||||
|
||||
yarn context
|
||||
============
|
||||
|
|
|
@ -20,7 +20,9 @@ Hadoop in Secure Mode
|
|||
Introduction
|
||||
------------
|
||||
|
||||
This document describes how to configure authentication for Hadoop in secure mode. When Hadoop is configured to run in secure mode, each Hadoop service and each user must be authenticated by Kerberos.
|
||||
In its default configuration, we expect you to make sure attackers don't have access to your Hadoop cluster by restricting all network access. If you want any restrictions on who can remotely access data or submit work, you MUST secure authentication and access for your Hadoop cluster as described in this document.
|
||||
|
||||
When Hadoop is configured to run in secure mode, each Hadoop service and each user must be authenticated by Kerberos.
|
||||
|
||||
Forward and reverse host lookup for all service hosts must be configured correctly to allow services to authenticate with each other. Host lookups may be configured using either DNS or `/etc/hosts` files. Working knowledge of Kerberos and DNS is recommended before attempting to configure Hadoop services in Secure Mode.
|
||||
|
||||
|
@ -595,7 +597,7 @@ hadoop kdiag \
|
|||
--keytab zk.service.keytab --principal zookeeper/devix.example.org@REALM
|
||||
```
|
||||
|
||||
This attempts to to perform all diagnostics without failing early, load in
|
||||
This attempts to perform all diagnostics without failing early, load in
|
||||
the HDFS and YARN XML resources, require a minimum key length of 1024 bytes,
|
||||
and log in as the principal `zookeeper/devix.example.org@REALM`, whose key must be in
|
||||
the keytab `zk.service.keytab`
|
||||
|
|
|
@ -35,6 +35,8 @@ These instructions do not cover integration with any Kerberos services,
|
|||
-everyone bringing up a production cluster should include connecting to their
|
||||
organisation's Kerberos infrastructure as a key part of the deployment.
|
||||
|
||||
See [Security](./SecureMode.html) for details on how to secure a cluster.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
|
|
|
@ -501,7 +501,7 @@ Where
|
|||
def blocks(FS, p, s, s + l) = a list of the blocks containing data(FS, path)[s:s+l]
|
||||
|
||||
|
||||
Note that that as `length(FS, f) ` is defined as `0` if `isDir(FS, f)`, the result
|
||||
Note that as `length(FS, f) ` is defined as `0` if `isDir(FS, f)`, the result
|
||||
of `getFileBlockLocations()` on a directory is `[]`
|
||||
|
||||
|
||||
|
@ -707,7 +707,7 @@ This is a significant difference between the behavior of object stores
|
|||
and that of filesystems, as it allows >1 client to create a file with `overwrite=false`,
|
||||
and potentially confuse file/directory logic. In particular, using `create()` to acquire
|
||||
an exclusive lock on a file (whoever creates the file without an error is considered
|
||||
the holder of the lock) may not not a safe algorithm to use when working with object stores.
|
||||
the holder of the lock) may not be a safe algorithm to use when working with object stores.
|
||||
|
||||
* Object stores may create an empty file as a marker when a file is created.
|
||||
However, object stores with `overwrite=true` semantics may not implement this atomically,
|
||||
|
|
|
@ -167,7 +167,7 @@ rather than just any FS-specific subclass implemented by the implementation
|
|||
custom subclasses.
|
||||
|
||||
This is critical to ensure safe use of the feature: directory listing/
|
||||
status serialization/deserialization can result result in the `withFileStatus()`
|
||||
status serialization/deserialization can result in the `withFileStatus()`
|
||||
argument not being the custom subclass returned by the Filesystem instance's
|
||||
own `getFileStatus()`, `listFiles()`, `listLocatedStatus()` calls, etc.
|
||||
|
||||
|
|
|
@ -228,7 +228,7 @@ Accordingly: *Use if and only if you are confident that the conditions are met.*
|
|||
|
||||
### `fs.s3a.create.header` User-supplied header support
|
||||
|
||||
Options with the prefix `fs.s3a.create.header.` will be added to to the
|
||||
Options with the prefix `fs.s3a.create.header.` will be added to the
|
||||
S3 object metadata as "user defined metadata".
|
||||
This metadata is visible to all applications. It can also be retrieved through the
|
||||
FileSystem/FileContext `listXAttrs()` and `getXAttrs()` API calls with the prefix `header.`
|
||||
|
|
|
@ -980,7 +980,7 @@ throw `UnsupportedOperationException`.
|
|||
### `StreamCapabilities`
|
||||
|
||||
Implementors of filesystem clients SHOULD implement the `StreamCapabilities`
|
||||
interface and its `hasCapabilities()` method to to declare whether or not
|
||||
interface and its `hasCapabilities()` method to declare whether or not
|
||||
an output streams offer the visibility and durability guarantees of `Syncable`.
|
||||
|
||||
Implementors of `StreamCapabilities.hasCapabilities()` MUST NOT declare that
|
||||
|
|
|
@ -143,7 +143,7 @@ too must have this context defined.
|
|||
|
||||
### Identifying the system accounts `hadoop.registry.system.acls`
|
||||
|
||||
These are the the accounts which are given full access to the base of the
|
||||
These are the accounts which are given full access to the base of the
|
||||
registry. The Resource Manager needs this option to create the root paths.
|
||||
|
||||
Client applications writing to the registry access to the nodes it creates.
|
||||
|
|
|
@ -29,7 +29,7 @@ a secure registry:
|
|||
1. Allow the RM to create per-user regions of the registration space
|
||||
1. Allow applications belonging to a user to write registry entries
|
||||
into their part of the space. These may be short-lived or long-lived
|
||||
YARN applications, or they may be be static applications.
|
||||
YARN applications, or they may be static applications.
|
||||
1. Prevent other users from writing into another user's part of the registry.
|
||||
1. Allow system services to register to a `/services` section of the registry.
|
||||
1. Provide read access to clients of a registry.
|
||||
|
|
|
@ -194,7 +194,7 @@ public abstract class TestConfigurationFieldsBase {
|
|||
HashMap<String,String> retVal = new HashMap<>();
|
||||
|
||||
// Setup regexp for valid properties
|
||||
String propRegex = "^[A-Za-z][A-Za-z0-9_-]+(\\.[A-Za-z0-9_-]+)+$";
|
||||
String propRegex = "^[A-Za-z][A-Za-z0-9_-]+(\\.[A-Za-z%s0-9_-]+)+$";
|
||||
Pattern p = Pattern.compile(propRegex);
|
||||
|
||||
// Iterate through class member variables
|
||||
|
|
|
@ -17,12 +17,14 @@
|
|||
*/
|
||||
package org.apache.hadoop.crypto;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
|
||||
import static org.mockito.Mockito.*;
|
||||
|
||||
/**
|
||||
|
@ -54,4 +56,22 @@ public class TestCryptoOutputStreamClosing {
|
|||
verify(outputStream, never()).close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnderlyingOutputStreamClosedWhenExceptionClosing() throws Exception {
|
||||
OutputStream outputStream = mock(OutputStream.class);
|
||||
CryptoOutputStream cos = spy(new CryptoOutputStream(outputStream, codec,
|
||||
new byte[16], new byte[16], 0L, true));
|
||||
|
||||
// exception while flushing during close
|
||||
doThrow(new IOException("problem flushing wrapped stream"))
|
||||
.when(cos).flush();
|
||||
|
||||
intercept(IOException.class,
|
||||
() -> cos.close());
|
||||
|
||||
// We expect that the close of the CryptoOutputStream closes the
|
||||
// wrapped OutputStream even though we got an exception
|
||||
// during CryptoOutputStream::close (in the flush method)
|
||||
verify(outputStream).close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1321,16 +1321,16 @@ public class TestFileUtil {
|
|||
if (wildcardPath.equals(classPath)) {
|
||||
// add wildcard matches
|
||||
for (File wildcardMatch: wildcardMatches) {
|
||||
expectedClassPaths.add(wildcardMatch.toURI().toURL()
|
||||
expectedClassPaths.add(wildcardMatch.getCanonicalFile().toURI().toURL()
|
||||
.toExternalForm());
|
||||
}
|
||||
} else {
|
||||
File fileCp = null;
|
||||
if(!new Path(classPath).isAbsolute()) {
|
||||
fileCp = new File(tmp, classPath);
|
||||
fileCp = new File(tmp, classPath).getCanonicalFile();
|
||||
}
|
||||
else {
|
||||
fileCp = new File(classPath);
|
||||
fileCp = new File(classPath).getCanonicalFile();
|
||||
}
|
||||
if (nonExistentSubdir.equals(classPath)) {
|
||||
// expect to maintain trailing path separator if present in input, even
|
||||
|
@ -1385,7 +1385,8 @@ public class TestFileUtil {
|
|||
for (Path jar: jars) {
|
||||
URL url = jar.toUri().toURL();
|
||||
assertTrue("the jar should match either of the jars",
|
||||
url.equals(jar1.toURI().toURL()) || url.equals(jar2.toURI().toURL()));
|
||||
url.equals(jar1.getCanonicalFile().toURI().toURL()) ||
|
||||
url.equals(jar2.getCanonicalFile().toURI().toURL()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -548,5 +548,24 @@ public class TestLocalDirAllocator {
|
|||
"p1/x", Long.MAX_VALUE - 1), "Expect a DiskErrorException.",
|
||||
() -> dirAllocator.getLocalPathForWrite("p1/x", Long.MAX_VALUE - 1, conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for HADOOP-18636 LocalDirAllocator cannot recover from directory tree deletion.
|
||||
*/
|
||||
@Test(timeout = 30000)
|
||||
public void testDirectoryRecovery() throws Throwable {
|
||||
String dir0 = buildBufferDir(ROOT, 0);
|
||||
String subdir = dir0 + "/subdir1/subdir2";
|
||||
|
||||
conf.set(CONTEXT, subdir);
|
||||
// get local path and an ancestor
|
||||
final Path pathForWrite = dirAllocator.getLocalPathForWrite("file", -1, conf);
|
||||
final Path ancestor = pathForWrite.getParent().getParent();
|
||||
|
||||
// delete that ancestor
|
||||
localFs.delete(ancestor, true);
|
||||
// and expect to get a new file back
|
||||
dirAllocator.getLocalPathForWrite("file2", -1, conf);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,8 +25,6 @@ import static org.junit.Assert.fail;
|
|||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -39,7 +37,8 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -51,8 +50,8 @@ import org.junit.Test;
|
|||
*/
|
||||
|
||||
public class TestViewFileSystemLocalFileSystem extends ViewFileSystemBaseTest {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestViewFileSystemLocalFileSystem.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(TestViewFileSystemLocalFileSystem.class);
|
||||
|
||||
@Override
|
||||
@Before
|
||||
|
|
|
@ -21,8 +21,6 @@ import java.io.IOException;
|
|||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -35,6 +33,8 @@ import org.junit.After;
|
|||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -43,8 +43,8 @@ import org.junit.Test;
|
|||
*/
|
||||
public class TestViewFileSystemOverloadSchemeLocalFileSystem {
|
||||
private static final String FILE = "file";
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestViewFileSystemOverloadSchemeLocalFileSystem.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(TestViewFileSystemOverloadSchemeLocalFileSystem.class);
|
||||
private FileSystem fsTarget;
|
||||
private Configuration conf;
|
||||
private Path targetTestRoot;
|
||||
|
|
|
@ -17,12 +17,12 @@
|
|||
*/
|
||||
package org.apache.hadoop.http;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.minikdc.MiniKdc;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.AuthenticationFilterInitializer;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.KerberosTestUtils;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
|
||||
|
@ -53,8 +53,6 @@ import static org.junit.Assert.assertTrue;
|
|||
*/
|
||||
public class TestHttpServerWithSpnego {
|
||||
|
||||
static final Log LOG = LogFactory.getLog(TestHttpServerWithSpnego.class);
|
||||
|
||||
private static final String SECRET_STR = "secret";
|
||||
private static final String HTTP_USER = "HTTP";
|
||||
private static final String PREFIX = "hadoop.http.authentication.";
|
||||
|
@ -108,7 +106,9 @@ public class TestHttpServerWithSpnego {
|
|||
*/
|
||||
@Test
|
||||
public void testAuthenticationWithProxyUser() throws Exception {
|
||||
Configuration spengoConf = getSpengoConf(new Configuration());
|
||||
Configuration spnegoConf = getSpnegoConf(new Configuration());
|
||||
spnegoConf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
|
||||
ProxyUserAuthenticationFilterInitializer.class.getName());
|
||||
|
||||
//setup logs dir
|
||||
System.setProperty("hadoop.log.dir", testRootDir.getAbsolutePath());
|
||||
|
@ -122,15 +122,15 @@ public class TestHttpServerWithSpnego {
|
|||
new String[]{"groupC"});
|
||||
|
||||
// Make userA impersonate users in groupB
|
||||
spengoConf.set("hadoop.proxyuser.userA.hosts", "*");
|
||||
spengoConf.set("hadoop.proxyuser.userA.groups", "groupB");
|
||||
ProxyUsers.refreshSuperUserGroupsConfiguration(spengoConf);
|
||||
spnegoConf.set("hadoop.proxyuser.userA.hosts", "*");
|
||||
spnegoConf.set("hadoop.proxyuser.userA.groups", "groupB");
|
||||
ProxyUsers.refreshSuperUserGroupsConfiguration(spnegoConf);
|
||||
|
||||
HttpServer2 httpServer = null;
|
||||
try {
|
||||
// Create http server to test.
|
||||
httpServer = getCommonBuilder()
|
||||
.setConf(spengoConf)
|
||||
.setConf(spnegoConf)
|
||||
.setACL(new AccessControlList("userA groupA"))
|
||||
.build();
|
||||
httpServer.start();
|
||||
|
@ -195,6 +195,48 @@ public class TestHttpServerWithSpnego {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAuthenticationToAllowList() throws Exception {
|
||||
Configuration spnegoConf = getSpnegoConf(new Configuration());
|
||||
String[] allowList = new String[] {"/jmx", "/prom"};
|
||||
String[] denyList = new String[] {"/conf", "/stacks", "/logLevel"};
|
||||
spnegoConf.set(PREFIX + "kerberos.endpoint.whitelist", String.join(",", allowList));
|
||||
spnegoConf.set(CommonConfigurationKeysPublic.HADOOP_PROMETHEUS_ENABLED, "true");
|
||||
spnegoConf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
|
||||
AuthenticationFilterInitializer.class.getName());
|
||||
|
||||
//setup logs dir
|
||||
System.setProperty("hadoop.log.dir", testRootDir.getAbsolutePath());
|
||||
|
||||
HttpServer2 httpServer = null;
|
||||
try {
|
||||
// Create http server to test.
|
||||
httpServer = getCommonBuilder().setConf(spnegoConf).setSecurityEnabled(true)
|
||||
.setUsernameConfKey(PREFIX + "kerberos.principal")
|
||||
.setKeytabConfKey(PREFIX + "kerberos.keytab").build();
|
||||
httpServer.start();
|
||||
|
||||
String serverURL = "http://" + NetUtils.getHostPortString(httpServer.getConnectorAddress(0));
|
||||
|
||||
// endpoints in whitelist should not require Kerberos authentication
|
||||
for (String endpoint : allowList) {
|
||||
HttpURLConnection conn = (HttpURLConnection) new URL(serverURL + endpoint).openConnection();
|
||||
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
|
||||
}
|
||||
|
||||
// endpoints not in whitelist should require Kerberos authentication
|
||||
for (String endpoint : denyList) {
|
||||
HttpURLConnection conn = (HttpURLConnection) new URL(serverURL + endpoint).openConnection();
|
||||
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode());
|
||||
}
|
||||
|
||||
} finally {
|
||||
if (httpServer != null) {
|
||||
httpServer.stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private AuthenticatedURL.Token getEncryptedAuthToken(Signer signer,
|
||||
String user) throws Exception {
|
||||
AuthenticationToken token =
|
||||
|
@ -213,10 +255,8 @@ public class TestHttpServerWithSpnego {
|
|||
return new Signer(secretProvider);
|
||||
}
|
||||
|
||||
private Configuration getSpengoConf(Configuration conf) {
|
||||
private Configuration getSpnegoConf(Configuration conf) {
|
||||
conf = new Configuration();
|
||||
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
|
||||
ProxyUserAuthenticationFilterInitializer.class.getName());
|
||||
conf.set(PREFIX + "type", "kerberos");
|
||||
conf.setBoolean(PREFIX + "simple.anonymous.allowed", false);
|
||||
conf.set(PREFIX + "signature.secret.file",
|
||||
|
|
|
@ -26,6 +26,9 @@ import org.apache.hadoop.io.SequenceFile.CompressionType;
|
|||
import org.apache.hadoop.io.SequenceFile.Metadata;
|
||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
||||
import org.apache.hadoop.io.compress.DefaultCodec;
|
||||
import org.apache.hadoop.io.serializer.Deserializer;
|
||||
import org.apache.hadoop.io.serializer.Serialization;
|
||||
import org.apache.hadoop.io.serializer.Serializer;
|
||||
import org.apache.hadoop.io.serializer.avro.AvroReflectSerialization;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
@ -756,6 +759,122 @@ public class TestSequenceFile {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSerializationUsingWritableNameAlias() throws IOException {
|
||||
Configuration config = new Configuration();
|
||||
config.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, SimpleSerializer.class.getName());
|
||||
Path path = new Path(System.getProperty("test.build.data", "."),
|
||||
"SerializationUsingWritableNameAlias");
|
||||
|
||||
// write with the original serializable class
|
||||
SequenceFile.Writer writer = SequenceFile.createWriter(
|
||||
config,
|
||||
SequenceFile.Writer.file(path),
|
||||
SequenceFile.Writer.keyClass(SimpleSerializable.class),
|
||||
SequenceFile.Writer.valueClass(SimpleSerializable.class));
|
||||
|
||||
int max = 10;
|
||||
try {
|
||||
SimpleSerializable val = new SimpleSerializable();
|
||||
val.setId(-1);
|
||||
for (int i = 0; i < max; i++) {
|
||||
SimpleSerializable key = new SimpleSerializable();
|
||||
key.setId(i);
|
||||
writer.append(key, val);
|
||||
}
|
||||
} finally {
|
||||
writer.close();
|
||||
}
|
||||
|
||||
// override name so it gets forced to the new serializable
|
||||
WritableName.setName(AnotherSimpleSerializable.class, SimpleSerializable.class.getName());
|
||||
|
||||
// read and expect our new serializable, and all the correct values read
|
||||
SequenceFile.Reader reader = new SequenceFile.Reader(
|
||||
config,
|
||||
SequenceFile.Reader.file(path));
|
||||
|
||||
AnotherSimpleSerializable key = new AnotherSimpleSerializable();
|
||||
int count = 0;
|
||||
while (true) {
|
||||
key = (AnotherSimpleSerializable) reader.next(key);
|
||||
if (key == null) {
|
||||
// make sure we exhausted all the ints we wrote
|
||||
assertEquals(count, max);
|
||||
break;
|
||||
}
|
||||
assertEquals(count++, key.getId());
|
||||
}
|
||||
}
|
||||
|
||||
public static class SimpleSerializable implements Serializable {
|
||||
|
||||
private int id;
|
||||
|
||||
public int getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(int id) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
||||
|
||||
public static class AnotherSimpleSerializable extends SimpleSerializable {
|
||||
}
|
||||
|
||||
public static class SimpleSerializer implements Serialization<SimpleSerializable> {
|
||||
|
||||
@Override
|
||||
public boolean accept(Class<?> c) {
|
||||
return SimpleSerializable.class.isAssignableFrom(c);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Serializer<SimpleSerializable> getSerializer(Class<SimpleSerializable> c) {
|
||||
return new Serializer<SimpleSerializable>() {
|
||||
private DataOutputStream out;
|
||||
@Override
|
||||
public void open(OutputStream out) throws IOException {
|
||||
this.out = new DataOutputStream(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serialize(SimpleSerializable simpleSerializable) throws IOException {
|
||||
out.writeInt(simpleSerializable.getId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
out.close();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer<SimpleSerializable> getDeserializer(Class<SimpleSerializable> c) {
|
||||
return new Deserializer<SimpleSerializable>() {
|
||||
private DataInputStream dis;
|
||||
@Override
|
||||
public void open(InputStream in) throws IOException {
|
||||
dis = new DataInputStream(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SimpleSerializable deserialize(SimpleSerializable simpleSerializable)
|
||||
throws IOException {
|
||||
simpleSerializable.setId(dis.readInt());
|
||||
return simpleSerializable;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
dis.close();
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/** For debugging and testing. */
|
||||
public static void main(String[] args) throws Exception {
|
||||
int count = 1024 * 1024;
|
||||
|
|
|
@ -24,8 +24,14 @@ import java.io.IOException;
|
|||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.io.serializer.Deserializer;
|
||||
import org.apache.hadoop.io.serializer.Serialization;
|
||||
import org.apache.hadoop.io.serializer.SerializationFactory;
|
||||
import org.apache.hadoop.io.serializer.Serializer;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/** Unit tests for WritableName. */
|
||||
|
@ -63,6 +69,28 @@ public class TestWritableName {
|
|||
}
|
||||
}
|
||||
|
||||
private static class SimpleSerializable {
|
||||
|
||||
}
|
||||
|
||||
private static class SimpleSerializer implements Serialization<SimpleSerializable> {
|
||||
|
||||
@Override
|
||||
public boolean accept(Class<?> c) {
|
||||
return c.equals(SimpleSerializable.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Serializer<SimpleSerializable> getSerializer(Class<SimpleSerializable> c) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer<SimpleSerializable> getDeserializer(Class<SimpleSerializable> c) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private static final String testName = "mystring";
|
||||
|
||||
@Test
|
||||
|
@ -95,7 +123,27 @@ public class TestWritableName {
|
|||
// check original name still works
|
||||
test = WritableName.getClass(testName, conf);
|
||||
assertTrue(test.equals(SimpleWritable.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAddNameSerializable() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, SimpleSerializer.class.getName());
|
||||
SerializationFactory serializationFactory =
|
||||
new SerializationFactory(conf);
|
||||
|
||||
String altName = testName + ".alt";
|
||||
|
||||
WritableName.addName(SimpleSerializable.class, altName);
|
||||
|
||||
Class<?> test = WritableName.getClass(altName, conf);
|
||||
assertEquals(test, SimpleSerializable.class);
|
||||
assertNotNull(serializationFactory.getSerialization(test));
|
||||
|
||||
// check original name still works
|
||||
test = WritableName.getClass(SimpleSerializable.class.getName(), conf);
|
||||
assertEquals(test, SimpleSerializable.class);
|
||||
assertNotNull(serializationFactory.getSerialization(test));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -1168,6 +1168,10 @@ public class TestIPC {
|
|||
|
||||
call(client, addr, serviceClass, conf);
|
||||
Connection connection = server.getConnections()[0];
|
||||
LOG.info("Connection is from: {}", connection);
|
||||
assertEquals(
|
||||
"Connection string representation should include both IP address and Host name", 2,
|
||||
connection.toString().split(" / ").length);
|
||||
int serviceClass2 = connection.getServiceClass();
|
||||
assertFalse(noChanged ^ serviceClass == serviceClass2);
|
||||
client.stop();
|
||||
|
@ -1336,7 +1340,7 @@ public class TestIPC {
|
|||
/**
|
||||
* Test the retry count while used in a retry proxy.
|
||||
*/
|
||||
@Test(timeout=60000)
|
||||
@Test(timeout=100000)
|
||||
public void testRetryProxy() throws IOException {
|
||||
final Client client = new Client(LongWritable.class, conf);
|
||||
|
||||
|
|
|
@ -378,7 +378,7 @@ public class TestRPC extends TestRpcBase {
|
|||
assertEquals(confReaders, server.getNumReaders());
|
||||
|
||||
server = newServerBuilder(conf)
|
||||
.setNumHandlers(1).setnumReaders(3).setQueueSizePerHandler(200)
|
||||
.setNumHandlers(1).setNumReaders(3).setQueueSizePerHandler(200)
|
||||
.setVerbose(false).build();
|
||||
|
||||
assertEquals(3, server.getNumReaders());
|
||||
|
@ -1849,6 +1849,11 @@ public class TestRPC extends TestRpcBase {
|
|||
// if it wasn't fatal, verify there's only one open connection.
|
||||
Connection[] conns = server.getConnections();
|
||||
assertEquals(reqName, 1, conns.length);
|
||||
String connectionInfo = conns[0].toString();
|
||||
LOG.info("Connection is from: {}", connectionInfo);
|
||||
assertEquals(
|
||||
"Connection string representation should include both IP address and Host name", 2,
|
||||
connectionInfo.split(" / ").length);
|
||||
// verify whether the connection should have been reused.
|
||||
if (isDisconnected) {
|
||||
assertNotSame(reqName, lastConn, conns[0]);
|
||||
|
|
|
@ -1,268 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.log;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.node.ContainerNode;
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.log4j.Appender;
|
||||
import org.apache.log4j.Category;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.log4j.WriterAppender;
|
||||
import org.apache.log4j.spi.HierarchyEventListener;
|
||||
import org.apache.log4j.spi.LoggerFactory;
|
||||
import org.apache.log4j.spi.LoggerRepository;
|
||||
import org.apache.log4j.spi.ThrowableInformation;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.io.Writer;
|
||||
import java.net.NoRouteToHostException;
|
||||
import java.util.Enumeration;
|
||||
import java.util.Vector;
|
||||
|
||||
public class TestLog4Json {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(TestLog4Json.class);
|
||||
|
||||
@Test
|
||||
public void testConstruction() throws Throwable {
|
||||
Log4Json l4j = new Log4Json();
|
||||
String outcome = l4j.toJson(new StringWriter(),
|
||||
"name", 0, "DEBUG", "thread1",
|
||||
"hello, world", null).toString();
|
||||
println("testConstruction", outcome);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testException() throws Throwable {
|
||||
Exception e =
|
||||
new NoRouteToHostException("that box caught fire 3 years ago");
|
||||
ThrowableInformation ti = new ThrowableInformation(e);
|
||||
Log4Json l4j = new Log4Json();
|
||||
long timeStamp = Time.now();
|
||||
String outcome = l4j.toJson(new StringWriter(),
|
||||
"testException",
|
||||
timeStamp,
|
||||
"INFO",
|
||||
"quoted\"",
|
||||
"new line\n and {}",
|
||||
ti)
|
||||
.toString();
|
||||
println("testException", outcome);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNestedException() throws Throwable {
|
||||
Exception e =
|
||||
new NoRouteToHostException("that box caught fire 3 years ago");
|
||||
Exception ioe = new IOException("Datacenter problems", e);
|
||||
ThrowableInformation ti = new ThrowableInformation(ioe);
|
||||
Log4Json l4j = new Log4Json();
|
||||
long timeStamp = Time.now();
|
||||
String outcome = l4j.toJson(new StringWriter(),
|
||||
"testNestedException",
|
||||
timeStamp,
|
||||
"INFO",
|
||||
"quoted\"",
|
||||
"new line\n and {}",
|
||||
ti)
|
||||
.toString();
|
||||
println("testNestedException", outcome);
|
||||
ContainerNode rootNode = Log4Json.parse(outcome);
|
||||
assertEntryEquals(rootNode, Log4Json.LEVEL, "INFO");
|
||||
assertEntryEquals(rootNode, Log4Json.NAME, "testNestedException");
|
||||
assertEntryEquals(rootNode, Log4Json.TIME, timeStamp);
|
||||
assertEntryEquals(rootNode, Log4Json.EXCEPTION_CLASS,
|
||||
ioe.getClass().getName());
|
||||
JsonNode node = assertNodeContains(rootNode, Log4Json.STACK);
|
||||
assertTrue("Not an array: " + node, node.isArray());
|
||||
node = assertNodeContains(rootNode, Log4Json.DATE);
|
||||
assertTrue("Not a string: " + node, node.isTextual());
|
||||
//rather than try and make assertions about the format of the text
|
||||
//message equalling another ISO date, this test asserts that the hypen
|
||||
//and colon characters are in the string.
|
||||
String dateText = node.textValue();
|
||||
assertTrue("No '-' in " + dateText, dateText.contains("-"));
|
||||
assertTrue("No '-' in " + dateText, dateText.contains(":"));
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Create a log instance and and log to it
|
||||
* @throws Throwable if it all goes wrong
|
||||
*/
|
||||
@Test
|
||||
public void testLog() throws Throwable {
|
||||
String message = "test message";
|
||||
Throwable throwable = null;
|
||||
String json = logOut(message, throwable);
|
||||
println("testLog", json);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a log instance and and log to it
|
||||
* @throws Throwable if it all goes wrong
|
||||
*/
|
||||
@Test
|
||||
public void testLogExceptions() throws Throwable {
|
||||
String message = "test message";
|
||||
Throwable inner = new IOException("Directory / not found");
|
||||
Throwable throwable = new IOException("startup failure", inner);
|
||||
String json = logOut(message, throwable);
|
||||
println("testLogExceptions", json);
|
||||
}
|
||||
|
||||
|
||||
void assertEntryEquals(ContainerNode rootNode, String key, String value) {
|
||||
JsonNode node = assertNodeContains(rootNode, key);
|
||||
assertEquals(value, node.textValue());
|
||||
}
|
||||
|
||||
private JsonNode assertNodeContains(ContainerNode rootNode, String key) {
|
||||
JsonNode node = rootNode.get(key);
|
||||
if (node == null) {
|
||||
fail("No entry of name \"" + key + "\" found in " + rootNode.toString());
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
void assertEntryEquals(ContainerNode rootNode, String key, long value) {
|
||||
JsonNode node = assertNodeContains(rootNode, key);
|
||||
assertEquals(value, node.numberValue());
|
||||
}
|
||||
|
||||
/**
|
||||
* Print out what's going on. The logging APIs aren't used and the text
|
||||
* delimited for more details
|
||||
*
|
||||
* @param name name of operation
|
||||
* @param text text to print
|
||||
*/
|
||||
private void println(String name, String text) {
|
||||
System.out.println(name + ": #" + text + "#");
|
||||
}
|
||||
|
||||
private String logOut(String message, Throwable throwable) {
|
||||
StringWriter writer = new StringWriter();
|
||||
Logger logger = createLogger(writer);
|
||||
logger.info(message, throwable);
|
||||
//remove and close the appender
|
||||
logger.removeAllAppenders();
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
public Logger createLogger(Writer writer) {
|
||||
TestLoggerRepository repo = new TestLoggerRepository();
|
||||
Logger logger = repo.getLogger("test");
|
||||
Log4Json layout = new Log4Json();
|
||||
WriterAppender appender = new WriterAppender(layout, writer);
|
||||
logger.addAppender(appender);
|
||||
return logger;
|
||||
}
|
||||
|
||||
/**
|
||||
* This test logger avoids integrating with the main runtimes Logger hierarchy
|
||||
* in ways the reader does not want to know.
|
||||
*/
|
||||
private static class TestLogger extends Logger {
|
||||
private TestLogger(String name, LoggerRepository repo) {
|
||||
super(name);
|
||||
repository = repo;
|
||||
setLevel(Level.INFO);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class TestLoggerRepository implements LoggerRepository {
|
||||
@Override
|
||||
public void addHierarchyEventListener(HierarchyEventListener listener) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDisabled(int level) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setThreshold(Level level) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setThreshold(String val) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void emitNoAppenderWarning(Category cat) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Level getThreshold() {
|
||||
return Level.ALL;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Logger getLogger(String name) {
|
||||
return new TestLogger(name, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Logger getLogger(String name, LoggerFactory factory) {
|
||||
return new TestLogger(name, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Logger getRootLogger() {
|
||||
return new TestLogger("root", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Logger exists(String name) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Enumeration getCurrentLoggers() {
|
||||
return new Vector().elements();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Enumeration getCurrentCategories() {
|
||||
return new Vector().elements();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void fireAddAppenderEvent(Category logger, Appender appender) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resetConfiguration() {
|
||||
}
|
||||
}
|
||||
}
|
|
@ -22,9 +22,6 @@ import java.net.SocketException;
|
|||
import java.net.URI;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
|
@ -70,8 +67,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
|
|||
private final String logName = TestLogLevel.class.getName();
|
||||
private String clientPrincipal;
|
||||
private String serverPrincipal;
|
||||
private final Log testlog = LogFactory.getLog(logName);
|
||||
private final Logger log = ((Log4JLogger)testlog).getLogger();
|
||||
private final Logger log = Logger.getLogger(logName);
|
||||
private final static String PRINCIPAL = "loglevel.principal";
|
||||
private final static String KEYTAB = "loglevel.keytab";
|
||||
private static final String PREFIX = "hadoop.http.authentication.";
|
||||
|
|
|
@ -18,21 +18,27 @@
|
|||
|
||||
package org.apache.hadoop.security.authentication.server;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.security.Principal;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.servlet.FilterConfig;
|
||||
import javax.servlet.FilterChain;
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.ServletOutputStream;
|
||||
import javax.servlet.ServletResponse;
|
||||
import javax.servlet.ServletRequest;
|
||||
import javax.servlet.http.Cookie;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import org.glassfish.grizzly.servlet.HttpServletResponseImpl;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
|
@ -76,8 +82,192 @@ public class TestProxyUserAuthenticationFilter {
|
|||
}
|
||||
}
|
||||
|
||||
private class HttpServletResponseForTest extends HttpServletResponseImpl {
|
||||
private class HttpServletResponseForTest implements HttpServletResponse {
|
||||
|
||||
@Override
|
||||
public void addCookie(Cookie cookie) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean containsHeader(String name) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String encodeURL(String url) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String encodeRedirectURL(String url) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String encodeUrl(String url) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String encodeRedirectUrl(String url) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sendError(int sc, String msg) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sendError(int sc) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sendRedirect(String location) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setDateHeader(String name, long date) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addDateHeader(String name, long date) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setHeader(String name, String value) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addHeader(String name, String value) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setIntHeader(String name, int value) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addIntHeader(String name, int value) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setStatus(int sc) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setStatus(int sc, String sm) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getStatus() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHeader(String name) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getHeaders(String name) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getHeaderNames() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCharacterEncoding() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getContentType() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ServletOutputStream getOutputStream() throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PrintWriter getWriter() throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setCharacterEncoding(String charset) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setContentLength(int len) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setContentLengthLong(long len) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setContentType(String type) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBufferSize(int size) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getBufferSize() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flushBuffer() throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resetBuffer() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCommitted() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reset() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setLocale(Locale loc) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public Locale getLocale() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -49,8 +49,6 @@ import java.util.function.Supplier;
|
|||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.lang3.RandomStringUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -117,29 +115,11 @@ public abstract class GenericTestUtils {
|
|||
public static final String ERROR_INVALID_ARGUMENT =
|
||||
"Total wait time should be greater than check interval time";
|
||||
|
||||
/**
|
||||
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
@SuppressWarnings("unchecked")
|
||||
public static void disableLog(Log log) {
|
||||
// We expect that commons-logging is a wrapper around Log4j.
|
||||
disableLog((Log4JLogger) log);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public static Logger toLog4j(org.slf4j.Logger logger) {
|
||||
return LogManager.getLogger(logger.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static void disableLog(Log4JLogger log) {
|
||||
log.getLogger().setLevel(Level.OFF);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
|
||||
*/
|
||||
|
@ -152,45 +132,6 @@ public abstract class GenericTestUtils {
|
|||
disableLog(toLog4j(logger));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
@SuppressWarnings("unchecked")
|
||||
public static void setLogLevel(Log log, Level level) {
|
||||
// We expect that commons-logging is a wrapper around Log4j.
|
||||
setLogLevel((Log4JLogger) log, level);
|
||||
}
|
||||
|
||||
/**
|
||||
* A helper used in log4j2 migration to accept legacy
|
||||
* org.apache.commons.logging apis.
|
||||
* <p>
|
||||
* And will be removed after migration.
|
||||
*
|
||||
* @param log a log
|
||||
* @param level level to be set
|
||||
*/
|
||||
@Deprecated
|
||||
public static void setLogLevel(Log log, org.slf4j.event.Level level) {
|
||||
setLogLevel(log, Level.toLevel(level.toString()));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static void setLogLevel(Log4JLogger log, Level level) {
|
||||
log.getLogger().setLevel(level);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static void setLogLevel(Logger logger, Level level) {
|
||||
logger.setLevel(level);
|
||||
}
|
||||
|
@ -535,13 +476,15 @@ public abstract class GenericTestUtils {
|
|||
private WriterAppender appender;
|
||||
private Logger logger;
|
||||
|
||||
public static LogCapturer captureLogs(Log l) {
|
||||
Logger logger = ((Log4JLogger)l).getLogger();
|
||||
return new LogCapturer(logger);
|
||||
public static LogCapturer captureLogs(org.slf4j.Logger logger) {
|
||||
if (logger.getName().equals("root")) {
|
||||
return new LogCapturer(org.apache.log4j.Logger.getRootLogger());
|
||||
}
|
||||
return new LogCapturer(toLog4j(logger));
|
||||
}
|
||||
|
||||
public static LogCapturer captureLogs(org.slf4j.Logger logger) {
|
||||
return new LogCapturer(toLog4j(logger));
|
||||
public static LogCapturer captureLogs(Logger logger) {
|
||||
return new LogCapturer(logger);
|
||||
}
|
||||
|
||||
private LogCapturer(Logger logger) {
|
||||
|
|
|
@ -140,7 +140,7 @@ public class TestGenericsUtil {
|
|||
|
||||
@Test
|
||||
public void testIsLog4jLogger() throws Exception {
|
||||
assertFalse("False if clazz is null", GenericsUtil.isLog4jLogger(null));
|
||||
assertFalse("False if clazz is null", GenericsUtil.isLog4jLogger((Class<?>) null));
|
||||
assertTrue("The implementation is Log4j",
|
||||
GenericsUtil.isLog4jLogger(TestGenericsUtil.class));
|
||||
}
|
||||
|
|
|
@ -18,10 +18,10 @@
|
|||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
|
@ -43,7 +43,7 @@ public class TestJarFinder {
|
|||
public void testJar() throws Exception {
|
||||
|
||||
//picking a class that is for sure in a JAR in the classpath
|
||||
String jar = JarFinder.getJar(LogFactory.class);
|
||||
String jar = JarFinder.getJar(LoggerFactory.class);
|
||||
Assert.assertTrue(new File(jar).exists());
|
||||
}
|
||||
|
||||
|
|
|
@ -32,9 +32,9 @@ public class TestSignalLogger {
|
|||
@Test(timeout=60000)
|
||||
public void testInstall() throws Exception {
|
||||
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
|
||||
SignalLogger.INSTANCE.register(LogAdapter.create(LOG));
|
||||
SignalLogger.INSTANCE.register(LOG);
|
||||
try {
|
||||
SignalLogger.INSTANCE.register(LogAdapter.create(LOG));
|
||||
SignalLogger.INSTANCE.register(LOG);
|
||||
Assert.fail("expected IllegalStateException from double registration");
|
||||
} catch (IllegalStateException e) {
|
||||
// fall through
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.util;
|
|||
import java.io.InputStream;
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import javax.xml.XMLConstants;
|
||||
import javax.xml.parsers.DocumentBuilder;
|
||||
import javax.xml.parsers.SAXParser;
|
||||
|
@ -134,10 +135,16 @@ public class TestXMLUtils extends AbstractHadoopTestBase {
|
|||
@Test
|
||||
public void testBestEffortSetAttribute() throws Exception {
|
||||
TransformerFactory factory = TransformerFactory.newInstance();
|
||||
Assert.assertFalse("unexpected attribute results in return of false",
|
||||
XMLUtils.bestEffortSetAttribute(factory, "unsupportedAttribute false", "abc"));
|
||||
Assert.assertTrue("expected attribute results in return of false",
|
||||
XMLUtils.bestEffortSetAttribute(factory, XMLConstants.ACCESS_EXTERNAL_DTD, ""));
|
||||
AtomicBoolean flag1 = new AtomicBoolean(true);
|
||||
XMLUtils.bestEffortSetAttribute(factory, flag1, "unsupportedAttribute false", "abc");
|
||||
Assert.assertFalse("unexpected attribute results in return of false?", flag1.get());
|
||||
AtomicBoolean flag2 = new AtomicBoolean(true);
|
||||
XMLUtils.bestEffortSetAttribute(factory, flag2, XMLConstants.ACCESS_EXTERNAL_DTD, "");
|
||||
Assert.assertTrue("expected attribute results in return of true?", flag2.get());
|
||||
AtomicBoolean flag3 = new AtomicBoolean(false);
|
||||
XMLUtils.bestEffortSetAttribute(factory, flag3, XMLConstants.ACCESS_EXTERNAL_DTD, "");
|
||||
Assert.assertFalse("expected attribute results in return of false if input flag is false?",
|
||||
flag3.get());
|
||||
}
|
||||
|
||||
private static InputStream getResourceStream(final String filename) {
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.log4j.PropertyConfigurator;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -104,8 +104,6 @@ public class KMSConfiguration {
|
|||
|
||||
public static final boolean KEY_AUTHORIZATION_ENABLE_DEFAULT = true;
|
||||
|
||||
private static final String LOG4J_PROPERTIES = "kms-log4j.properties";
|
||||
|
||||
static {
|
||||
Configuration.addDefaultResource(KMS_DEFAULT_XML);
|
||||
Configuration.addDefaultResource(KMS_SITE_XML);
|
||||
|
@ -163,31 +161,20 @@ public class KMSConfiguration {
|
|||
return newer;
|
||||
}
|
||||
|
||||
public static void initLogging() {
|
||||
String confDir = System.getProperty(KMS_CONFIG_DIR);
|
||||
if (confDir == null) {
|
||||
throw new RuntimeException("System property '" +
|
||||
KMSConfiguration.KMS_CONFIG_DIR + "' not defined");
|
||||
/**
|
||||
* Validate whether "kms.config.dir" and "log4j.configuration" are defined in the System
|
||||
* properties. If not, abort the KMS WebServer.
|
||||
*/
|
||||
public static void validateSystemProps() {
|
||||
if (System.getProperty(KMS_CONFIG_DIR) == null) {
|
||||
String errorMsg = "System property '" + KMS_CONFIG_DIR + "' not defined";
|
||||
System.err.println("Aborting KMSWebServer because " + errorMsg);
|
||||
throw new RuntimeException(errorMsg);
|
||||
}
|
||||
if (System.getProperty("log4j.configuration") == null) {
|
||||
System.setProperty("log4j.defaultInitOverride", "true");
|
||||
boolean fromClasspath = true;
|
||||
File log4jConf = new File(confDir, LOG4J_PROPERTIES).getAbsoluteFile();
|
||||
if (log4jConf.exists()) {
|
||||
PropertyConfigurator.configureAndWatch(log4jConf.getPath(), 1000);
|
||||
fromClasspath = false;
|
||||
} else {
|
||||
ClassLoader cl = Thread.currentThread().getContextClassLoader();
|
||||
URL log4jUrl = cl.getResource(LOG4J_PROPERTIES);
|
||||
if (log4jUrl != null) {
|
||||
PropertyConfigurator.configure(log4jUrl);
|
||||
}
|
||||
}
|
||||
LOG.debug("KMS log starting");
|
||||
if (fromClasspath) {
|
||||
LOG.warn("Log4j configuration file '{}' not found", LOG4J_PROPERTIES);
|
||||
LOG.warn("Logging with INFO level to standard output");
|
||||
}
|
||||
String errorMsg = "System property 'log4j.configuration' not defined";
|
||||
System.err.println("Aborting KMSWebServer because " + errorMsg);
|
||||
throw new RuntimeException(errorMsg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -185,7 +185,7 @@ public class KMSWebServer {
|
|||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
KMSConfiguration.initLogging();
|
||||
KMSConfiguration.validateSystemProps();
|
||||
StringUtils.startupShutdownMessage(KMSWebServer.class, args, LOG);
|
||||
Configuration conf = KMSConfiguration.getKMSConf();
|
||||
Configuration sslConf = SSLFactory.readSSLConfiguration(conf, SSLFactory.Mode.SERVER);
|
||||
|
|
|
@ -49,6 +49,8 @@ function hadoop_subcommand_kms
|
|||
"-Dkms.config.dir=${HADOOP_CONF_DIR}"
|
||||
hadoop_add_param HADOOP_OPTS "-Dkms.log.dir=" \
|
||||
"-Dkms.log.dir=${HADOOP_LOG_DIR}"
|
||||
hadoop_add_param HADOOP_OPTS "-Dlog4j.configuration=" \
|
||||
"-Dlog4j.configuration=file:${HADOOP_CONF_DIR}/kms-log4j.properties"
|
||||
|
||||
if [[ "${HADOOP_DAEMON_MODE}" == "default" ]] ||
|
||||
[[ "${HADOOP_DAEMON_MODE}" == "start" ]]; then
|
||||
|
|
|
@ -63,11 +63,6 @@
|
|||
<artifactId>mockito-core</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.servlet</groupId>
|
||||
<artifactId>javax.servlet-api</artifactId>
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.oncrpc;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketAddress;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
|
@ -26,6 +27,7 @@ import io.netty.buffer.Unpooled;
|
|||
import io.netty.channel.ChannelHandler;
|
||||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.ChannelInboundHandlerAdapter;
|
||||
import io.netty.channel.SimpleChannelInboundHandler;
|
||||
import io.netty.channel.socket.DatagramPacket;
|
||||
import io.netty.handler.codec.ByteToMessageDecoder;
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
|
@ -172,15 +174,18 @@ public final class RpcUtil {
|
|||
*/
|
||||
@ChannelHandler.Sharable
|
||||
private static final class RpcUdpResponseStage extends
|
||||
ChannelInboundHandlerAdapter {
|
||||
SimpleChannelInboundHandler<RpcResponse> {
|
||||
public RpcUdpResponseStage() {
|
||||
// do not auto release the RpcResponse message.
|
||||
super(false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void channelRead(ChannelHandlerContext ctx, Object msg)
|
||||
throws Exception {
|
||||
RpcResponse r = (RpcResponse) msg;
|
||||
// TODO: check out https://github.com/netty/netty/issues/1282 for
|
||||
// correct usage
|
||||
ctx.channel().writeAndFlush(r.data());
|
||||
protected void channelRead0(ChannelHandlerContext ctx,
|
||||
RpcResponse response) throws Exception {
|
||||
ByteBuf buf = Unpooled.wrappedBuffer(response.data());
|
||||
ctx.writeAndFlush(new DatagramPacket(
|
||||
buf, (InetSocketAddress) response.recipient()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -117,15 +117,13 @@ final class Portmap {
|
|||
.childOption(ChannelOption.SO_REUSEADDR, true)
|
||||
.channel(NioServerSocketChannel.class)
|
||||
.childHandler(new ChannelInitializer<SocketChannel>() {
|
||||
private final IdleStateHandler idleStateHandler = new IdleStateHandler(
|
||||
0, 0, idleTimeMilliSeconds, TimeUnit.MILLISECONDS);
|
||||
|
||||
@Override
|
||||
protected void initChannel(SocketChannel ch) throws Exception {
|
||||
ChannelPipeline p = ch.pipeline();
|
||||
|
||||
p.addLast(RpcUtil.constructRpcFrameDecoder(),
|
||||
RpcUtil.STAGE_RPC_MESSAGE_PARSER, idleStateHandler, handler,
|
||||
RpcUtil.STAGE_RPC_MESSAGE_PARSER, new IdleStateHandler(0, 0,
|
||||
idleTimeMilliSeconds, TimeUnit.MILLISECONDS), handler,
|
||||
RpcUtil.STAGE_RPC_TCP_RESPONSE);
|
||||
}});
|
||||
|
||||
|
|
|
@ -23,8 +23,10 @@ import java.net.DatagramPacket;
|
|||
import java.net.DatagramSocket;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.oncrpc.RpcReply;
|
||||
import org.junit.Assert;
|
||||
|
||||
import org.apache.hadoop.oncrpc.RpcCall;
|
||||
|
@ -35,6 +37,8 @@ import org.junit.AfterClass;
|
|||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class TestPortmap {
|
||||
private static Portmap pm = new Portmap();
|
||||
private static final int SHORT_TIMEOUT_MILLISECONDS = 10;
|
||||
|
@ -92,6 +96,19 @@ public class TestPortmap {
|
|||
pm.getUdpServerLoAddress());
|
||||
try {
|
||||
s.send(p);
|
||||
|
||||
// verify that portmap server responds a UDF packet back to the client
|
||||
byte[] receiveData = new byte[65535];
|
||||
DatagramPacket receivePacket = new DatagramPacket(receiveData,
|
||||
receiveData.length);
|
||||
s.setSoTimeout(2000);
|
||||
s.receive(receivePacket);
|
||||
|
||||
// verify that the registration is accepted.
|
||||
XDR xdr = new XDR(Arrays.copyOfRange(receiveData, 0,
|
||||
receivePacket.getLength()));
|
||||
RpcReply reply = RpcReply.read(xdr);
|
||||
assertEquals(reply.getState(), RpcReply.ReplyState.MSG_ACCEPTED);
|
||||
} finally {
|
||||
s.close();
|
||||
}
|
||||
|
|
|
@ -61,10 +61,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<artifactId>hadoop-common</artifactId>
|
||||
<scope>provided</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
|
|
|
@ -224,7 +224,7 @@ public class DFSInputStream extends FSInputStream
|
|||
}
|
||||
|
||||
/**
|
||||
* Grab the open-file info from namenode
|
||||
* Grab the open-file info from namenode.
|
||||
* @param refreshLocatedBlocks whether to re-fetch locatedblocks
|
||||
*/
|
||||
void openInfo(boolean refreshLocatedBlocks) throws IOException {
|
||||
|
@ -940,7 +940,8 @@ public class DFSInputStream extends FSInputStream
|
|||
* @return Returns chosen DNAddrPair; Can be null if refetchIfRequired is
|
||||
* false.
|
||||
*/
|
||||
private DNAddrPair chooseDataNode(LocatedBlock block,
|
||||
@VisibleForTesting
|
||||
DNAddrPair chooseDataNode(LocatedBlock block,
|
||||
Collection<DatanodeInfo> ignoredNodes, boolean refetchIfRequired)
|
||||
throws IOException {
|
||||
while (true) {
|
||||
|
@ -955,6 +956,14 @@ public class DFSInputStream extends FSInputStream
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* RefetchLocations should only be called when there are no active requests
|
||||
* to datanodes. In the hedged read case this means futures should be empty.
|
||||
* @param block The locatedBlock to get new datanode locations for.
|
||||
* @param ignoredNodes A list of ignored nodes. This list can be null and can be cleared.
|
||||
* @return the locatedBlock with updated datanode locations.
|
||||
* @throws IOException
|
||||
*/
|
||||
private LocatedBlock refetchLocations(LocatedBlock block,
|
||||
Collection<DatanodeInfo> ignoredNodes) throws IOException {
|
||||
String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(),
|
||||
|
@ -999,13 +1008,24 @@ public class DFSInputStream extends FSInputStream
|
|||
throw new InterruptedIOException(
|
||||
"Interrupted while choosing DataNode for read.");
|
||||
}
|
||||
clearLocalDeadNodes(); //2nd option is to remove only nodes[blockId]
|
||||
clearCachedNodeState(ignoredNodes);
|
||||
openInfo(true);
|
||||
block = refreshLocatedBlock(block);
|
||||
failures++;
|
||||
return block;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear both the dead nodes and the ignored nodes
|
||||
* @param ignoredNodes is cleared
|
||||
*/
|
||||
private void clearCachedNodeState(Collection<DatanodeInfo> ignoredNodes) {
|
||||
clearLocalDeadNodes(); //2nd option is to remove only nodes[blockId]
|
||||
if (ignoredNodes != null) {
|
||||
ignoredNodes.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the best node from which to stream the data.
|
||||
* @param block LocatedBlock, containing nodes in priority order.
|
||||
|
@ -1337,8 +1357,12 @@ public class DFSInputStream extends FSInputStream
|
|||
} catch (InterruptedException ie) {
|
||||
// Ignore and retry
|
||||
}
|
||||
if (refetch) {
|
||||
refetchLocations(block, ignored);
|
||||
// If refetch is true, then all nodes are in deadNodes or ignoredNodes.
|
||||
// We should loop through all futures and remove them, so we do not
|
||||
// have concurrent requests to the same node.
|
||||
// Once all futures are cleared, we can clear the ignoredNodes and retry.
|
||||
if (refetch && futures.isEmpty()) {
|
||||
block = refetchLocations(block, ignored);
|
||||
}
|
||||
// We got here if exception. Ignore this node on next go around IFF
|
||||
// we found a chosenNode to hedge read against.
|
||||
|
|
|
@ -293,9 +293,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
|
|||
DataChecksum checksum, String[] favoredNodes)
|
||||
throws IOException {
|
||||
super(dfsClient, src, stat, flag, progress, checksum, favoredNodes, false);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Creating DFSStripedOutputStream for " + src);
|
||||
}
|
||||
LOG.debug("Creating DFSStripedOutputStream for {}", src);
|
||||
|
||||
ecPolicy = stat.getErasureCodingPolicy();
|
||||
final int numParityBlocks = ecPolicy.getNumParityUnits();
|
||||
|
|
|
@ -135,16 +135,12 @@ public class HAUtilClient {
|
|||
HdfsConstants.HDFS_URI_SCHEME)
|
||||
+ "//" + specificToken.getService());
|
||||
ugi.addToken(alias, specificToken);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Mapped HA service delegation token for logical URI " +
|
||||
haUri + " to namenode " + singleNNAddr);
|
||||
}
|
||||
LOG.debug("Mapped HA service delegation token for logical URI {}" +
|
||||
" to namenode {}", haUri, singleNNAddr);
|
||||
}
|
||||
} else {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("No HA service delegation token found for logical URI " +
|
||||
haUri);
|
||||
}
|
||||
LOG.debug("No HA service delegation token found for logical URI {}",
|
||||
haUri);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,6 +53,7 @@ public final class DfsPathCapabilities {
|
|||
case CommonPathCapabilities.FS_SNAPSHOTS:
|
||||
case CommonPathCapabilities.FS_STORAGEPOLICY:
|
||||
case CommonPathCapabilities.FS_XATTRS:
|
||||
case CommonPathCapabilities.FS_TRUNCATE:
|
||||
return Optional.of(true);
|
||||
case CommonPathCapabilities.FS_SYMLINKS:
|
||||
return Optional.of(FileSystem.areSymlinksEnabled());
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
package org.apache.hadoop.fs;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotSame;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
@ -77,18 +77,18 @@ public class TestXAttr {
|
|||
assertEquals(XATTR3, XATTR3);
|
||||
assertEquals(XATTR4, XATTR4);
|
||||
assertEquals(XATTR5, XATTR5);
|
||||
assertFalse(XATTR1.equals(XATTR2));
|
||||
assertFalse(XATTR2.equals(XATTR3));
|
||||
assertFalse(XATTR3.equals(XATTR4));
|
||||
assertFalse(XATTR4.equals(XATTR5));
|
||||
assertNotEquals(XATTR1, XATTR2);
|
||||
assertNotEquals(XATTR2, XATTR3);
|
||||
assertNotEquals(XATTR3, XATTR4);
|
||||
assertNotEquals(XATTR4, XATTR5);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testXAttrHashCode() {
|
||||
assertEquals(XATTR.hashCode(), XATTR1.hashCode());
|
||||
assertFalse(XATTR1.hashCode() == XATTR2.hashCode());
|
||||
assertFalse(XATTR2.hashCode() == XATTR3.hashCode());
|
||||
assertFalse(XATTR3.hashCode() == XATTR4.hashCode());
|
||||
assertFalse(XATTR4.hashCode() == XATTR5.hashCode());
|
||||
assertNotEquals(XATTR1.hashCode(), XATTR2.hashCode());
|
||||
assertNotEquals(XATTR2.hashCode(), XATTR3.hashCode());
|
||||
assertNotEquals(XATTR3.hashCode(), XATTR4.hashCode());
|
||||
assertNotEquals(XATTR4.hashCode(), XATTR5.hashCode());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -342,7 +342,7 @@ public class TestByteArrayManager {
|
|||
}
|
||||
|
||||
if ((i & 0xFF) == 0) {
|
||||
LOG.info("randomRecycler sleep, i=" + i);
|
||||
LOG.info("randomRecycler sleep, i={}", i);
|
||||
sleepMs(100);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1646,6 +1646,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
case CommonPathCapabilities.FS_SNAPSHOTS:
|
||||
case CommonPathCapabilities.FS_STORAGEPOLICY:
|
||||
case CommonPathCapabilities.FS_XATTRS:
|
||||
case CommonPathCapabilities.FS_TRUNCATE:
|
||||
return true;
|
||||
case CommonPathCapabilities.FS_SYMLINKS:
|
||||
return false;
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.CommonPathCapabilities;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileChecksum;
|
||||
|
@ -302,9 +303,17 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
|
||||
|
||||
fs.close();
|
||||
assertPathCapabilityForTruncate(file);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertPathCapabilityForTruncate(Path file) throws Exception {
|
||||
FileSystem fs = this.getHttpFSFileSystem();
|
||||
assertTrue("HttpFS/WebHdfs/SWebHdfs support truncate",
|
||||
fs.hasPathCapability(file, CommonPathCapabilities.FS_TRUNCATE));
|
||||
fs.close();
|
||||
}
|
||||
|
||||
private void testConcat() throws Exception {
|
||||
Configuration config = getProxiedFSConf();
|
||||
config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
|
||||
|
|
|
@ -124,7 +124,7 @@ Please make sure you write code that is portable.
|
|||
* Don't write code that could force a non-aligned word access.
|
||||
* This causes performance issues on most architectures and isn't supported at all on some.
|
||||
* Generally the compiler will prevent this unless you are doing clever things with pointers e.g. abusing placement new or reinterpreting a pointer into a pointer to a wider type.
|
||||
* If a type needs to be a a specific width make sure to specify it.
|
||||
* If a type needs to be a specific width make sure to specify it.
|
||||
* `int32_t my_32_bit_wide_int`
|
||||
* Avoid using compiler dependent pragmas or attributes.
|
||||
* If there is a justified and unavoidable reason for using these you must document why. See examples below.
|
||||
|
|
|
@ -133,11 +133,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<artifactId>commons-io</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-daemon</groupId>
|
||||
<artifactId>commons-daemon</artifactId>
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue