Merge branch 'trunk' into HDFS-16864
This commit is contained in:
commit
1ecbf91cb0
|
@ -14,6 +14,8 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
github:
|
github:
|
||||||
|
ghp_path: /
|
||||||
|
ghp_branch: gh-pages
|
||||||
enabled_merge_buttons:
|
enabled_merge_buttons:
|
||||||
squash: true
|
squash: true
|
||||||
merge: false
|
merge: false
|
||||||
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
name: website
|
||||||
|
|
||||||
|
# Controls when the action will run.
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ trunk ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout Hadoop trunk
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
repository: apache/hadoop
|
||||||
|
- name: Set up JDK 8
|
||||||
|
uses: actions/setup-java@v3
|
||||||
|
with:
|
||||||
|
java-version: '8'
|
||||||
|
distribution: 'temurin'
|
||||||
|
- name: Cache local Maven repository
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/.m2/repository
|
||||||
|
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-maven-
|
||||||
|
- name: Build Hadoop maven plugins
|
||||||
|
run: cd hadoop-maven-plugins && mvn --batch-mode install
|
||||||
|
- name: Build Hadoop
|
||||||
|
run: mvn clean install -DskipTests -DskipShade
|
||||||
|
- name: Build document
|
||||||
|
run: mvn clean site
|
||||||
|
- name: Stage document
|
||||||
|
run: mvn site:stage -DstagingDirectory=${GITHUB_WORKSPACE}/staging/
|
||||||
|
- name: Deploy to GitHub Pages
|
||||||
|
uses: peaceiris/actions-gh-pages@v3
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
publish_dir: ./staging/hadoop-project
|
||||||
|
user_name: 'github-actions[bot]'
|
||||||
|
user_email: 'github-actions[bot]@users.noreply.github.com'
|
||||||
|
|
|
@ -250,7 +250,6 @@ commons-codec:commons-codec:1.11
|
||||||
commons-collections:commons-collections:3.2.2
|
commons-collections:commons-collections:3.2.2
|
||||||
commons-daemon:commons-daemon:1.0.13
|
commons-daemon:commons-daemon:1.0.13
|
||||||
commons-io:commons-io:2.8.0
|
commons-io:commons-io:2.8.0
|
||||||
commons-logging:commons-logging:1.1.3
|
|
||||||
commons-net:commons-net:3.9.0
|
commons-net:commons-net:3.9.0
|
||||||
de.ruedigermoeller:fst:2.50
|
de.ruedigermoeller:fst:2.50
|
||||||
io.grpc:grpc-api:1.26.0
|
io.grpc:grpc-api:1.26.0
|
||||||
|
@ -260,7 +259,6 @@ io.grpc:grpc-netty:1.26.0
|
||||||
io.grpc:grpc-protobuf:1.26.0
|
io.grpc:grpc-protobuf:1.26.0
|
||||||
io.grpc:grpc-protobuf-lite:1.26.0
|
io.grpc:grpc-protobuf-lite:1.26.0
|
||||||
io.grpc:grpc-stub:1.26.0
|
io.grpc:grpc-stub:1.26.0
|
||||||
io.netty:netty:3.10.6.Final
|
|
||||||
io.netty:netty-all:4.1.77.Final
|
io.netty:netty-all:4.1.77.Final
|
||||||
io.netty:netty-buffer:4.1.77.Final
|
io.netty:netty-buffer:4.1.77.Final
|
||||||
io.netty:netty-codec:4.1.77.Final
|
io.netty:netty-codec:4.1.77.Final
|
||||||
|
@ -363,7 +361,7 @@ org.lz4:lz4-java:1.7.1
|
||||||
org.objenesis:objenesis:2.6
|
org.objenesis:objenesis:2.6
|
||||||
org.xerial.snappy:snappy-java:1.0.5
|
org.xerial.snappy:snappy-java:1.0.5
|
||||||
org.yaml:snakeyaml:1.33
|
org.yaml:snakeyaml:1.33
|
||||||
org.wildfly.openssl:wildfly-openssl:1.0.7.Final
|
org.wildfly.openssl:wildfly-openssl:1.1.3.Final
|
||||||
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------
|
||||||
|
|
|
@ -180,11 +180,6 @@
|
||||||
<artifactId>jersey-server</artifactId>
|
<artifactId>jersey-server</artifactId>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>commons-logging</groupId>
|
|
||||||
<artifactId>commons-logging</artifactId>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>log4j</groupId>
|
<groupId>log4j</groupId>
|
||||||
<artifactId>log4j</artifactId>
|
<artifactId>log4j</artifactId>
|
||||||
|
@ -200,11 +195,6 @@
|
||||||
<artifactId>assertj-core</artifactId>
|
<artifactId>assertj-core</artifactId>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>org.glassfish.grizzly</groupId>
|
|
||||||
<artifactId>grizzly-http-servlet</artifactId>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>commons-beanutils</groupId>
|
<groupId>commons-beanutils</groupId>
|
||||||
<artifactId>commons-beanutils</artifactId>
|
<artifactId>commons-beanutils</artifactId>
|
||||||
|
|
|
@ -241,12 +241,15 @@ public class CryptoOutputStream extends FilterOutputStream implements
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
flush();
|
try {
|
||||||
if (closeOutputStream) {
|
flush();
|
||||||
super.close();
|
} finally {
|
||||||
codec.close();
|
if (closeOutputStream) {
|
||||||
|
super.close();
|
||||||
|
codec.close();
|
||||||
|
}
|
||||||
|
freeBuffers();
|
||||||
}
|
}
|
||||||
freeBuffers();
|
|
||||||
} finally {
|
} finally {
|
||||||
closed = true;
|
closed = true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -302,7 +302,12 @@ public abstract class CachingBlockManager extends BlockManager {
|
||||||
|
|
||||||
private void read(BufferData data) throws IOException {
|
private void read(BufferData data) throws IOException {
|
||||||
synchronized (data) {
|
synchronized (data) {
|
||||||
readBlock(data, false, BufferData.State.BLANK);
|
try {
|
||||||
|
readBlock(data, false, BufferData.State.BLANK);
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.error("error reading block {}", data.getBlockNumber(), e);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -362,9 +367,6 @@ public abstract class CachingBlockManager extends BlockManager {
|
||||||
buffer.flip();
|
buffer.flip();
|
||||||
data.setReady(expectedState);
|
data.setReady(expectedState);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
String message = String.format("error during readBlock(%s)", data.getBlockNumber());
|
|
||||||
LOG.error(message, e);
|
|
||||||
|
|
||||||
if (isPrefetch && tracker != null) {
|
if (isPrefetch && tracker != null) {
|
||||||
tracker.failed();
|
tracker.failed();
|
||||||
}
|
}
|
||||||
|
@ -406,7 +408,8 @@ public abstract class CachingBlockManager extends BlockManager {
|
||||||
try {
|
try {
|
||||||
blockManager.prefetch(data, taskQueuedStartTime);
|
blockManager.prefetch(data, taskQueuedStartTime);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.error("error during prefetch", e);
|
LOG.info("error prefetching block {}. {}", data.getBlockNumber(), e.getMessage());
|
||||||
|
LOG.debug("error prefetching block {}", data.getBlockNumber(), e);
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -493,7 +496,8 @@ public abstract class CachingBlockManager extends BlockManager {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.error("error waiting on blockFuture: {}", data, e);
|
LOG.info("error waiting on blockFuture: {}. {}", data, e.getMessage());
|
||||||
|
LOG.debug("error waiting on blockFuture: {}", data, e);
|
||||||
data.setDone();
|
data.setDone();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -523,8 +527,8 @@ public abstract class CachingBlockManager extends BlockManager {
|
||||||
data.setDone();
|
data.setDone();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
numCachingErrors.incrementAndGet();
|
numCachingErrors.incrementAndGet();
|
||||||
String message = String.format("error adding block to cache after wait: %s", data);
|
LOG.info("error adding block to cache after wait: {}. {}", data, e.getMessage());
|
||||||
LOG.error(message, e);
|
LOG.debug("error adding block to cache after wait: {}", data, e);
|
||||||
data.setDone();
|
data.setDone();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,11 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Filesystem implementations that allow Hadoop to read directly from
|
||||||
|
* the local file system.
|
||||||
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.fs.local;
|
package org.apache.hadoop.fs.local;
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Support for the execution of a file system command.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.fs.shell;
|
package org.apache.hadoop.fs.shell;
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Support for embedded HTTP services.
|
||||||
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
|
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.http;
|
package org.apache.hadoop.http;
|
||||||
|
|
|
@ -32,7 +32,6 @@ import java.nio.file.StandardOpenOption;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -246,30 +245,6 @@ public class IOUtils {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
|
|
||||||
* null pointers. Must only be used for cleanup in exception handlers.
|
|
||||||
*
|
|
||||||
* @param log the log to record problems to at debug level. Can be null.
|
|
||||||
* @param closeables the objects to close
|
|
||||||
* @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)}
|
|
||||||
* instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static void cleanup(Log log, java.io.Closeable... closeables) {
|
|
||||||
for (java.io.Closeable c : closeables) {
|
|
||||||
if (c != null) {
|
|
||||||
try {
|
|
||||||
c.close();
|
|
||||||
} catch(Throwable e) {
|
|
||||||
if (log != null && log.isDebugEnabled()) {
|
|
||||||
log.debug("Exception in closing " + c, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
|
* Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
|
||||||
* null pointers. Must only be used for cleanup in exception handlers.
|
* null pointers. Must only be used for cleanup in exception handlers.
|
||||||
|
|
|
@ -92,7 +92,7 @@ public class WritableName {
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
Class<?> writableClass = NAME_TO_CLASS.get(name);
|
Class<?> writableClass = NAME_TO_CLASS.get(name);
|
||||||
if (writableClass != null)
|
if (writableClass != null)
|
||||||
return writableClass.asSubclass(Writable.class);
|
return writableClass;
|
||||||
try {
|
try {
|
||||||
return conf.getClassByName(name);
|
return conf.getClassByName(name);
|
||||||
} catch (ClassNotFoundException e) {
|
} catch (ClassNotFoundException e) {
|
||||||
|
|
|
@ -15,6 +15,11 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation of compression/decompression for the BZip2
|
||||||
|
* compression algorithm.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.io.compress.bzip2;
|
package org.apache.hadoop.io.compress.bzip2;
|
||||||
|
|
|
@ -15,6 +15,13 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation of compression/decompression for the LZ4
|
||||||
|
* compression algorithm.
|
||||||
|
*
|
||||||
|
* @see <a href="http://code.google.com/p/lz4/">LZ4</a>
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.io.compress.lz4;
|
package org.apache.hadoop.io.compress.lz4;
|
||||||
|
|
|
@ -15,6 +15,13 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation of compression/decompression for the Snappy
|
||||||
|
* compression algorithm.
|
||||||
|
*
|
||||||
|
* @see <a href="http://code.google.com/p/snappy/">Snappy</a>
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.io.compress.snappy;
|
package org.apache.hadoop.io.compress.snappy;
|
||||||
|
|
|
@ -15,6 +15,13 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation of compression/decompression based on the popular
|
||||||
|
* gzip compressed file format.
|
||||||
|
*
|
||||||
|
* @see <a href="http://www.gzip.org/">gzip</a>
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.io.compress.zlib;
|
package org.apache.hadoop.io.compress.zlib;
|
||||||
|
|
|
@ -15,6 +15,13 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation of compression/decompression based on the zStandard
|
||||||
|
* compression algorithm.
|
||||||
|
*
|
||||||
|
* @see <a href="https://github.com/facebook/zstd">zStandard</a>
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.io.compress.zstd;
|
package org.apache.hadoop.io.compress.zstd;
|
||||||
|
|
|
@ -15,6 +15,12 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Various native IO-related calls not available in Java. These
|
||||||
|
* functions should generally be used alongside a fallback to another
|
||||||
|
* more portable mechanism.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.io.nativeio;
|
package org.apache.hadoop.io.nativeio;
|
||||||
|
|
|
@ -704,7 +704,7 @@ public class Client implements AutoCloseable {
|
||||||
* handle that, a relogin is attempted.
|
* handle that, a relogin is attempted.
|
||||||
*/
|
*/
|
||||||
private synchronized void handleSaslConnectionFailure(
|
private synchronized void handleSaslConnectionFailure(
|
||||||
final int currRetries, final int maxRetries, final Exception ex,
|
final int currRetries, final int maxRetries, final IOException ex,
|
||||||
final Random rand, final UserGroupInformation ugi) throws IOException,
|
final Random rand, final UserGroupInformation ugi) throws IOException,
|
||||||
InterruptedException {
|
InterruptedException {
|
||||||
ugi.doAs(new PrivilegedExceptionAction<Object>() {
|
ugi.doAs(new PrivilegedExceptionAction<Object>() {
|
||||||
|
@ -715,10 +715,7 @@ public class Client implements AutoCloseable {
|
||||||
disposeSasl();
|
disposeSasl();
|
||||||
if (shouldAuthenticateOverKrb()) {
|
if (shouldAuthenticateOverKrb()) {
|
||||||
if (currRetries < maxRetries) {
|
if (currRetries < maxRetries) {
|
||||||
if(LOG.isDebugEnabled()) {
|
LOG.debug("Exception encountered while connecting to the server {}", remoteId, ex);
|
||||||
LOG.debug("Exception encountered while connecting to "
|
|
||||||
+ "the server : " + ex);
|
|
||||||
}
|
|
||||||
// try re-login
|
// try re-login
|
||||||
if (UserGroupInformation.isLoginKeytabBased()) {
|
if (UserGroupInformation.isLoginKeytabBased()) {
|
||||||
UserGroupInformation.getLoginUser().reloginFromKeytab();
|
UserGroupInformation.getLoginUser().reloginFromKeytab();
|
||||||
|
@ -736,7 +733,11 @@ public class Client implements AutoCloseable {
|
||||||
+ UserGroupInformation.getLoginUser().getUserName() + " to "
|
+ UserGroupInformation.getLoginUser().getUserName() + " to "
|
||||||
+ remoteId;
|
+ remoteId;
|
||||||
LOG.warn(msg, ex);
|
LOG.warn(msg, ex);
|
||||||
throw (IOException) new IOException(msg).initCause(ex);
|
throw NetUtils.wrapException(remoteId.getAddress().getHostName(),
|
||||||
|
remoteId.getAddress().getPort(),
|
||||||
|
NetUtils.getHostname(),
|
||||||
|
0,
|
||||||
|
ex);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// With RequestHedgingProxyProvider, one rpc call will send multiple
|
// With RequestHedgingProxyProvider, one rpc call will send multiple
|
||||||
|
@ -744,11 +745,9 @@ public class Client implements AutoCloseable {
|
||||||
// all other requests will be interrupted. It's not a big problem,
|
// all other requests will be interrupted. It's not a big problem,
|
||||||
// and should not print a warning log.
|
// and should not print a warning log.
|
||||||
if (ex instanceof InterruptedIOException) {
|
if (ex instanceof InterruptedIOException) {
|
||||||
LOG.debug("Exception encountered while connecting to the server",
|
LOG.debug("Exception encountered while connecting to the server {}", remoteId, ex);
|
||||||
ex);
|
|
||||||
} else {
|
} else {
|
||||||
LOG.warn("Exception encountered while connecting to the server ",
|
LOG.warn("Exception encountered while connecting to the server {}", remoteId, ex);
|
||||||
ex);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (ex instanceof RemoteException)
|
if (ex instanceof RemoteException)
|
||||||
|
@ -1182,7 +1181,14 @@ public class Client implements AutoCloseable {
|
||||||
final ResponseBuffer buf = new ResponseBuffer();
|
final ResponseBuffer buf = new ResponseBuffer();
|
||||||
header.writeDelimitedTo(buf);
|
header.writeDelimitedTo(buf);
|
||||||
RpcWritable.wrap(call.rpcRequest).writeTo(buf);
|
RpcWritable.wrap(call.rpcRequest).writeTo(buf);
|
||||||
rpcRequestQueue.put(Pair.of(call, buf));
|
// Wait for the message to be sent. We offer with timeout to
|
||||||
|
// prevent a race condition between checking the shouldCloseConnection
|
||||||
|
// and the stopping of the polling thread
|
||||||
|
while (!shouldCloseConnection.get()) {
|
||||||
|
if (rpcRequestQueue.offer(Pair.of(call, buf), 1, TimeUnit.SECONDS)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Receive a response.
|
/* Receive a response.
|
||||||
|
|
|
@ -900,12 +900,26 @@ public class RPC {
|
||||||
/**
|
/**
|
||||||
* @return Default: -1.
|
* @return Default: -1.
|
||||||
* @param numReaders input numReaders.
|
* @param numReaders input numReaders.
|
||||||
|
* @deprecated call {@link #setNumReaders(int value)} instead.
|
||||||
*/
|
*/
|
||||||
|
@Deprecated
|
||||||
public Builder setnumReaders(int numReaders) {
|
public Builder setnumReaders(int numReaders) {
|
||||||
this.numReaders = numReaders;
|
this.numReaders = numReaders;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the number of reader threads.
|
||||||
|
*
|
||||||
|
* @return this builder.
|
||||||
|
* @param value input numReaders.
|
||||||
|
* @since HADOOP-18625.
|
||||||
|
*/
|
||||||
|
public Builder setNumReaders(int value) {
|
||||||
|
this.numReaders = value;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return Default: -1.
|
* @return Default: -1.
|
||||||
* @param queueSizePerHandler
|
* @param queueSizePerHandler
|
||||||
|
|
|
@ -1985,11 +1985,26 @@ public abstract class Server {
|
||||||
private long lastContact;
|
private long lastContact;
|
||||||
private int dataLength;
|
private int dataLength;
|
||||||
private Socket socket;
|
private Socket socket;
|
||||||
|
|
||||||
// Cache the remote host & port info so that even if the socket is
|
// Cache the remote host & port info so that even if the socket is
|
||||||
// disconnected, we can say where it used to connect to.
|
// disconnected, we can say where it used to connect to.
|
||||||
private String hostAddress;
|
|
||||||
private int remotePort;
|
/**
|
||||||
private InetAddress addr;
|
* Client Host IP address from where the socket connection is being established to the Server.
|
||||||
|
*/
|
||||||
|
private final String hostAddress;
|
||||||
|
/**
|
||||||
|
* Client remote port used for the given socket connection.
|
||||||
|
*/
|
||||||
|
private final int remotePort;
|
||||||
|
/**
|
||||||
|
* Address to which the socket is connected to.
|
||||||
|
*/
|
||||||
|
private final InetAddress addr;
|
||||||
|
/**
|
||||||
|
* Client Host address from where the socket connection is being established to the Server.
|
||||||
|
*/
|
||||||
|
private final String hostName;
|
||||||
|
|
||||||
IpcConnectionContextProto connectionContext;
|
IpcConnectionContextProto connectionContext;
|
||||||
String protocolName;
|
String protocolName;
|
||||||
|
@ -2033,8 +2048,12 @@ public abstract class Server {
|
||||||
this.isOnAuxiliaryPort = isOnAuxiliaryPort;
|
this.isOnAuxiliaryPort = isOnAuxiliaryPort;
|
||||||
if (addr == null) {
|
if (addr == null) {
|
||||||
this.hostAddress = "*Unknown*";
|
this.hostAddress = "*Unknown*";
|
||||||
|
this.hostName = this.hostAddress;
|
||||||
} else {
|
} else {
|
||||||
|
// host IP address
|
||||||
this.hostAddress = addr.getHostAddress();
|
this.hostAddress = addr.getHostAddress();
|
||||||
|
// host name for the IP address
|
||||||
|
this.hostName = addr.getHostName();
|
||||||
}
|
}
|
||||||
this.remotePort = socket.getPort();
|
this.remotePort = socket.getPort();
|
||||||
this.responseQueue = new LinkedList<RpcCall>();
|
this.responseQueue = new LinkedList<RpcCall>();
|
||||||
|
@ -2050,7 +2069,7 @@ public abstract class Server {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return getHostAddress() + ":" + remotePort;
|
return hostName + ":" + remotePort + " / " + hostAddress + ":" + remotePort;
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean setShouldClose() {
|
boolean setShouldClose() {
|
||||||
|
@ -2463,19 +2482,18 @@ public abstract class Server {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!RpcConstants.HEADER.equals(dataLengthBuffer)) {
|
if (!RpcConstants.HEADER.equals(dataLengthBuffer)) {
|
||||||
LOG.warn("Incorrect RPC Header length from {}:{} "
|
LOG.warn("Incorrect RPC Header length from {}:{} / {}:{}. Expected: {}. Actual: {}",
|
||||||
+ "expected length: {} got length: {}",
|
hostName, remotePort, hostAddress, remotePort, RpcConstants.HEADER,
|
||||||
hostAddress, remotePort, RpcConstants.HEADER, dataLengthBuffer);
|
dataLengthBuffer);
|
||||||
setupBadVersionResponse(version);
|
setupBadVersionResponse(version);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (version != CURRENT_VERSION) {
|
if (version != CURRENT_VERSION) {
|
||||||
//Warning is ok since this is not supposed to happen.
|
//Warning is ok since this is not supposed to happen.
|
||||||
LOG.warn("Version mismatch from " +
|
LOG.warn("Version mismatch from {}:{} / {}:{}. "
|
||||||
hostAddress + ":" + remotePort +
|
+ "Expected version: {}. Actual version: {} ", hostName,
|
||||||
" got version " + version +
|
remotePort, hostAddress, remotePort, CURRENT_VERSION, version);
|
||||||
" expected version " + CURRENT_VERSION);
|
|
||||||
setupBadVersionResponse(version);
|
setupBadVersionResponse(version);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,10 +34,6 @@ import javax.servlet.http.HttpServletResponse;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.VisibleForTesting;
|
import org.apache.hadoop.classification.VisibleForTesting;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
|
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.commons.logging.impl.Jdk14Logger;
|
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
@ -51,6 +47,8 @@ import org.apache.hadoop.util.GenericOptionsParser;
|
||||||
import org.apache.hadoop.util.ServletUtil;
|
import org.apache.hadoop.util.ServletUtil;
|
||||||
import org.apache.hadoop.util.Tool;
|
import org.apache.hadoop.util.Tool;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
|
import org.apache.log4j.Level;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Change log level in runtime.
|
* Change log level in runtime.
|
||||||
|
@ -340,22 +338,14 @@ public class LogLevel {
|
||||||
out.println(MARKER
|
out.println(MARKER
|
||||||
+ "Submitted Class Name: <b>" + logName + "</b><br />");
|
+ "Submitted Class Name: <b>" + logName + "</b><br />");
|
||||||
|
|
||||||
Log log = LogFactory.getLog(logName);
|
Logger log = Logger.getLogger(logName);
|
||||||
out.println(MARKER
|
out.println(MARKER
|
||||||
+ "Log Class: <b>" + log.getClass().getName() +"</b><br />");
|
+ "Log Class: <b>" + log.getClass().getName() +"</b><br />");
|
||||||
if (level != null) {
|
if (level != null) {
|
||||||
out.println(MARKER + "Submitted Level: <b>" + level + "</b><br />");
|
out.println(MARKER + "Submitted Level: <b>" + level + "</b><br />");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (log instanceof Log4JLogger) {
|
process(log, level, out);
|
||||||
process(((Log4JLogger)log).getLogger(), level, out);
|
|
||||||
}
|
|
||||||
else if (log instanceof Jdk14Logger) {
|
|
||||||
process(((Jdk14Logger)log).getLogger(), level, out);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
out.println("Sorry, " + log.getClass() + " not supported.<br />");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out.println(FORMS);
|
out.println(FORMS);
|
||||||
|
@ -371,14 +361,14 @@ public class LogLevel {
|
||||||
+ "<input type='submit' value='Set Log Level' />"
|
+ "<input type='submit' value='Set Log Level' />"
|
||||||
+ "</form>";
|
+ "</form>";
|
||||||
|
|
||||||
private static void process(org.apache.log4j.Logger log, String level,
|
private static void process(Logger log, String level,
|
||||||
PrintWriter out) throws IOException {
|
PrintWriter out) throws IOException {
|
||||||
if (level != null) {
|
if (level != null) {
|
||||||
if (!level.equalsIgnoreCase(org.apache.log4j.Level.toLevel(level)
|
if (!level.equalsIgnoreCase(Level.toLevel(level)
|
||||||
.toString())) {
|
.toString())) {
|
||||||
out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
|
out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
|
||||||
} else {
|
} else {
|
||||||
log.setLevel(org.apache.log4j.Level.toLevel(level));
|
log.setLevel(Level.toLevel(level));
|
||||||
out.println(MARKER + "Setting Level to " + level + " ...<br />");
|
out.println(MARKER + "Setting Level to " + level + " ...<br />");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -386,21 +376,5 @@ public class LogLevel {
|
||||||
+ "Effective Level: <b>" + log.getEffectiveLevel() + "</b><br />");
|
+ "Effective Level: <b>" + log.getEffectiveLevel() + "</b><br />");
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void process(java.util.logging.Logger log, String level,
|
|
||||||
PrintWriter out) throws IOException {
|
|
||||||
if (level != null) {
|
|
||||||
String levelToUpperCase = level.toUpperCase();
|
|
||||||
try {
|
|
||||||
log.setLevel(java.util.logging.Level.parse(levelToUpperCase));
|
|
||||||
} catch (IllegalArgumentException e) {
|
|
||||||
out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
|
|
||||||
}
|
|
||||||
out.println(MARKER + "Setting Level to " + level + " ...<br />");
|
|
||||||
}
|
|
||||||
|
|
||||||
java.util.logging.Level lev;
|
|
||||||
for(; (lev = log.getLevel()) == null; log = log.getParent());
|
|
||||||
out.println(MARKER + "Effective Level: <b>" + lev + "</b><br />");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Support for service-level authorization.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
package org.apache.hadoop.security.authorize;
|
package org.apache.hadoop.security.authorize;
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Filters for HTTP service security.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
package org.apache.hadoop.security.http;
|
package org.apache.hadoop.security.http;
|
||||||
|
|
|
@ -866,9 +866,9 @@ extends AbstractDelegationTokenIdentifier>
|
||||||
/**
|
/**
|
||||||
* Add token stats to the owner to token count mapping.
|
* Add token stats to the owner to token count mapping.
|
||||||
*
|
*
|
||||||
* @param id
|
* @param id token id.
|
||||||
*/
|
*/
|
||||||
private void addTokenForOwnerStats(TokenIdent id) {
|
protected void addTokenForOwnerStats(TokenIdent id) {
|
||||||
String realOwner = getTokenRealOwner(id);
|
String realOwner = getTokenRealOwner(id);
|
||||||
tokenOwnerStats.put(realOwner,
|
tokenOwnerStats.put(realOwner,
|
||||||
tokenOwnerStats.getOrDefault(realOwner, 0L)+1);
|
tokenOwnerStats.getOrDefault(realOwner, 0L)+1);
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ZooKeeper secret manager for TokenIdentifiers and DelegationKeys.
|
||||||
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
|
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
package org.apache.hadoop.security.token.delegation;
|
package org.apache.hadoop.security.token.delegation;
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Support for delegation tokens.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
package org.apache.hadoop.security.token;
|
package org.apache.hadoop.security.token;
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.service;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||||
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -75,9 +74,10 @@ public final class ServiceOperations {
|
||||||
* @param log the log to warn at
|
* @param log the log to warn at
|
||||||
* @param service a service; may be null
|
* @param service a service; may be null
|
||||||
* @return any exception that was caught; null if none was.
|
* @return any exception that was caught; null if none was.
|
||||||
* @see ServiceOperations#stopQuietly(Service)
|
* @deprecated to be removed with 3.4.0. Use {@link #stopQuietly(Logger, Service)} instead.
|
||||||
*/
|
*/
|
||||||
public static Exception stopQuietly(Log log, Service service) {
|
@Deprecated
|
||||||
|
public static Exception stopQuietly(org.apache.commons.logging.Log log, Service service) {
|
||||||
try {
|
try {
|
||||||
stop(service);
|
stop(service);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Support for services.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
package org.apache.hadoop.service;
|
package org.apache.hadoop.service;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
|
@ -33,7 +33,7 @@ import java.util.function.Consumer;
|
||||||
*
|
*
|
||||||
* This class does not support null element.
|
* This class does not support null element.
|
||||||
*
|
*
|
||||||
* This class is not thread safe.
|
* This class is thread safe.
|
||||||
*
|
*
|
||||||
* @param <K> Key type for looking up the elements
|
* @param <K> Key type for looking up the elements
|
||||||
* @param <E> Element type, which must be
|
* @param <E> Element type, which must be
|
||||||
|
|
|
@ -1,78 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.util;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
|
|
||||||
class LogAdapter {
|
|
||||||
private Log LOG;
|
|
||||||
private Logger LOGGER;
|
|
||||||
|
|
||||||
private LogAdapter(Log LOG) {
|
|
||||||
this.LOG = LOG;
|
|
||||||
}
|
|
||||||
|
|
||||||
private LogAdapter(Logger LOGGER) {
|
|
||||||
this.LOGGER = LOGGER;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated use {@link #create(Logger)} instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static LogAdapter create(Log LOG) {
|
|
||||||
return new LogAdapter(LOG);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static LogAdapter create(Logger LOGGER) {
|
|
||||||
return new LogAdapter(LOGGER);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void info(String msg) {
|
|
||||||
if (LOG != null) {
|
|
||||||
LOG.info(msg);
|
|
||||||
} else if (LOGGER != null) {
|
|
||||||
LOGGER.info(msg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void warn(String msg, Throwable t) {
|
|
||||||
if (LOG != null) {
|
|
||||||
LOG.warn(msg, t);
|
|
||||||
} else if (LOGGER != null) {
|
|
||||||
LOGGER.warn(msg, t);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void debug(Throwable t) {
|
|
||||||
if (LOG != null) {
|
|
||||||
LOG.debug(t);
|
|
||||||
} else if (LOGGER != null) {
|
|
||||||
LOGGER.debug("", t);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void error(String msg) {
|
|
||||||
if (LOG != null) {
|
|
||||||
LOG.error(msg);
|
|
||||||
} else if (LOGGER != null) {
|
|
||||||
LOGGER.error(msg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -36,7 +36,6 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configurable;
|
import org.apache.hadoop.conf.Configurable;
|
||||||
|
@ -228,10 +227,12 @@ public class ReflectionUtils {
|
||||||
* @param log the logger that logs the stack trace
|
* @param log the logger that logs the stack trace
|
||||||
* @param title a descriptive title for the call stacks
|
* @param title a descriptive title for the call stacks
|
||||||
* @param minInterval the minimum time from the last
|
* @param minInterval the minimum time from the last
|
||||||
|
* @deprecated to be removed with 3.4.0. Use {@link #logThreadInfo(Logger, String, long)} instead.
|
||||||
*/
|
*/
|
||||||
public static void logThreadInfo(Log log,
|
@Deprecated
|
||||||
String title,
|
public static void logThreadInfo(org.apache.commons.logging.Log log,
|
||||||
long minInterval) {
|
String title,
|
||||||
|
long minInterval) {
|
||||||
boolean dumpStack = false;
|
boolean dumpStack = false;
|
||||||
if (log.isInfoEnabled()) {
|
if (log.isInfoEnabled()) {
|
||||||
synchronized (ReflectionUtils.class) {
|
synchronized (ReflectionUtils.class) {
|
||||||
|
|
|
@ -18,10 +18,10 @@
|
||||||
|
|
||||||
package org.apache.hadoop.util;
|
package org.apache.hadoop.util;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
import sun.misc.Signal;
|
import sun.misc.Signal;
|
||||||
import sun.misc.SignalHandler;
|
import sun.misc.SignalHandler;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
|
@ -42,11 +42,11 @@ public enum SignalLogger {
|
||||||
* Our signal handler.
|
* Our signal handler.
|
||||||
*/
|
*/
|
||||||
private static class Handler implements SignalHandler {
|
private static class Handler implements SignalHandler {
|
||||||
final private LogAdapter LOG;
|
final private Logger log;
|
||||||
final private SignalHandler prevHandler;
|
final private SignalHandler prevHandler;
|
||||||
|
|
||||||
Handler(String name, LogAdapter LOG) {
|
Handler(String name, Logger log) {
|
||||||
this.LOG = LOG;
|
this.log = log;
|
||||||
prevHandler = Signal.handle(new Signal(name), this);
|
prevHandler = Signal.handle(new Signal(name), this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ public enum SignalLogger {
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void handle(Signal signal) {
|
public void handle(Signal signal) {
|
||||||
LOG.error("RECEIVED SIGNAL " + signal.getNumber() +
|
log.error("RECEIVED SIGNAL " + signal.getNumber() +
|
||||||
": SIG" + signal.getName());
|
": SIG" + signal.getName());
|
||||||
prevHandler.handle(signal);
|
prevHandler.handle(signal);
|
||||||
}
|
}
|
||||||
|
@ -66,13 +66,9 @@ public enum SignalLogger {
|
||||||
/**
|
/**
|
||||||
* Register some signal handlers.
|
* Register some signal handlers.
|
||||||
*
|
*
|
||||||
* @param LOG The log4j logfile to use in the signal handlers.
|
* @param log The log4j logfile to use in the signal handlers.
|
||||||
*/
|
*/
|
||||||
public void register(final Log LOG) {
|
public void register(final Logger log) {
|
||||||
register(LogAdapter.create(LOG));
|
|
||||||
}
|
|
||||||
|
|
||||||
void register(final LogAdapter LOG) {
|
|
||||||
if (registered) {
|
if (registered) {
|
||||||
throw new IllegalStateException("Can't re-install the signal handlers.");
|
throw new IllegalStateException("Can't re-install the signal handlers.");
|
||||||
}
|
}
|
||||||
|
@ -83,15 +79,15 @@ public enum SignalLogger {
|
||||||
String separator = "";
|
String separator = "";
|
||||||
for (String signalName : SIGNALS) {
|
for (String signalName : SIGNALS) {
|
||||||
try {
|
try {
|
||||||
new Handler(signalName, LOG);
|
new Handler(signalName, log);
|
||||||
bld.append(separator)
|
bld.append(separator)
|
||||||
.append(signalName);
|
.append(signalName);
|
||||||
separator = ", ";
|
separator = ", ";
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.debug(e);
|
log.debug("Error: ", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bld.append("]");
|
bld.append("]");
|
||||||
LOG.info(bld.toString());
|
log.info(bld.toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -740,42 +740,26 @@ public class StringUtils {
|
||||||
* Print a log message for starting up and shutting down
|
* Print a log message for starting up and shutting down
|
||||||
* @param clazz the class of the server
|
* @param clazz the class of the server
|
||||||
* @param args arguments
|
* @param args arguments
|
||||||
* @param LOG the target log object
|
* @param log the target log object
|
||||||
*/
|
*/
|
||||||
public static void startupShutdownMessage(Class<?> clazz, String[] args,
|
public static void startupShutdownMessage(Class<?> clazz, String[] args,
|
||||||
final org.apache.commons.logging.Log LOG) {
|
final org.slf4j.Logger log) {
|
||||||
startupShutdownMessage(clazz, args, LogAdapter.create(LOG));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Print a log message for starting up and shutting down
|
|
||||||
* @param clazz the class of the server
|
|
||||||
* @param args arguments
|
|
||||||
* @param LOG the target log object
|
|
||||||
*/
|
|
||||||
public static void startupShutdownMessage(Class<?> clazz, String[] args,
|
|
||||||
final org.slf4j.Logger LOG) {
|
|
||||||
startupShutdownMessage(clazz, args, LogAdapter.create(LOG));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void startupShutdownMessage(Class<?> clazz, String[] args,
|
|
||||||
final LogAdapter LOG) {
|
|
||||||
final String hostname = NetUtils.getHostname();
|
final String hostname = NetUtils.getHostname();
|
||||||
final String classname = clazz.getSimpleName();
|
final String classname = clazz.getSimpleName();
|
||||||
LOG.info(createStartupShutdownMessage(classname, hostname, args));
|
log.info(createStartupShutdownMessage(classname, hostname, args));
|
||||||
|
|
||||||
if (SystemUtils.IS_OS_UNIX) {
|
if (SystemUtils.IS_OS_UNIX) {
|
||||||
try {
|
try {
|
||||||
SignalLogger.INSTANCE.register(LOG);
|
SignalLogger.INSTANCE.register(log);
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
LOG.warn("failed to register any UNIX signal loggers: ", t);
|
log.warn("failed to register any UNIX signal loggers: ", t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ShutdownHookManager.get().addShutdownHook(
|
ShutdownHookManager.get().addShutdownHook(
|
||||||
new Runnable() {
|
new Runnable() {
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
|
log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
|
||||||
"Shutting down " + classname + " at " + hostname}));
|
"Shutting down " + classname + " at " + hostname}));
|
||||||
LogManager.shutdown();
|
LogManager.shutdown();
|
||||||
}
|
}
|
||||||
|
|
|
@ -93,6 +93,10 @@ public class VersionInfo {
|
||||||
return info.getProperty("protocVersion", "Unknown");
|
return info.getProperty("protocVersion", "Unknown");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected String _getCompilePlatform() {
|
||||||
|
return info.getProperty("compilePlatform", "Unknown");
|
||||||
|
}
|
||||||
|
|
||||||
private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("common");
|
private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("common");
|
||||||
/**
|
/**
|
||||||
* Get the Hadoop version.
|
* Get the Hadoop version.
|
||||||
|
@ -167,12 +171,21 @@ public class VersionInfo {
|
||||||
return COMMON_VERSION_INFO._getProtocVersion();
|
return COMMON_VERSION_INFO._getProtocVersion();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the OS platform used for the build.
|
||||||
|
* @return the OS platform
|
||||||
|
*/
|
||||||
|
public static String getCompilePlatform() {
|
||||||
|
return COMMON_VERSION_INFO._getCompilePlatform();
|
||||||
|
}
|
||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
LOG.debug("version: "+ getVersion());
|
LOG.debug("version: "+ getVersion());
|
||||||
System.out.println("Hadoop " + getVersion());
|
System.out.println("Hadoop " + getVersion());
|
||||||
System.out.println("Source code repository " + getUrl() + " -r " +
|
System.out.println("Source code repository " + getUrl() + " -r " +
|
||||||
getRevision());
|
getRevision());
|
||||||
System.out.println("Compiled by " + getUser() + " on " + getDate());
|
System.out.println("Compiled by " + getUser() + " on " + getDate());
|
||||||
|
System.out.println("Compiled on platform " + getCompilePlatform());
|
||||||
System.out.println("Compiled with protoc " + getProtocVersion());
|
System.out.println("Compiled with protoc " + getProtocVersion());
|
||||||
System.out.println("From source with checksum " + getSrcChecksum());
|
System.out.println("From source with checksum " + getSrcChecksum());
|
||||||
System.out.println("This command was run using " +
|
System.out.println("This command was run using " +
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/*
|
/*
|
||||||
* *
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,9 +14,11 @@
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
* /
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Support for concurrent execution.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.util.concurrent;
|
package org.apache.hadoop.util.concurrent;
|
||||||
|
|
|
@ -24,3 +24,4 @@ date=${version-info.build.time}
|
||||||
url=${version-info.scm.uri}
|
url=${version-info.scm.uri}
|
||||||
srcChecksum=${version-info.source.md5}
|
srcChecksum=${version-info.source.md5}
|
||||||
protocVersion=${hadoop.protobuf.version}
|
protocVersion=${hadoop.protobuf.version}
|
||||||
|
compilePlatform=${os.detected.classifier}
|
||||||
|
|
|
@ -35,6 +35,8 @@ These instructions do not cover integration with any Kerberos services,
|
||||||
-everyone bringing up a production cluster should include connecting to their
|
-everyone bringing up a production cluster should include connecting to their
|
||||||
organisation's Kerberos infrastructure as a key part of the deployment.
|
organisation's Kerberos infrastructure as a key part of the deployment.
|
||||||
|
|
||||||
|
See [Security](./SecureMode.html) for details on how to secure a cluster.
|
||||||
|
|
||||||
Prerequisites
|
Prerequisites
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
|
|
|
@ -17,12 +17,14 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.crypto;
|
package org.apache.hadoop.crypto;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
|
||||||
import static org.mockito.Mockito.*;
|
import static org.mockito.Mockito.*;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -54,4 +56,22 @@ public class TestCryptoOutputStreamClosing {
|
||||||
verify(outputStream, never()).close();
|
verify(outputStream, never()).close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUnderlyingOutputStreamClosedWhenExceptionClosing() throws Exception {
|
||||||
|
OutputStream outputStream = mock(OutputStream.class);
|
||||||
|
CryptoOutputStream cos = spy(new CryptoOutputStream(outputStream, codec,
|
||||||
|
new byte[16], new byte[16], 0L, true));
|
||||||
|
|
||||||
|
// exception while flushing during close
|
||||||
|
doThrow(new IOException("problem flushing wrapped stream"))
|
||||||
|
.when(cos).flush();
|
||||||
|
|
||||||
|
intercept(IOException.class,
|
||||||
|
() -> cos.close());
|
||||||
|
|
||||||
|
// We expect that the close of the CryptoOutputStream closes the
|
||||||
|
// wrapped OutputStream even though we got an exception
|
||||||
|
// during CryptoOutputStream::close (in the flush method)
|
||||||
|
verify(outputStream).close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1321,16 +1321,16 @@ public class TestFileUtil {
|
||||||
if (wildcardPath.equals(classPath)) {
|
if (wildcardPath.equals(classPath)) {
|
||||||
// add wildcard matches
|
// add wildcard matches
|
||||||
for (File wildcardMatch: wildcardMatches) {
|
for (File wildcardMatch: wildcardMatches) {
|
||||||
expectedClassPaths.add(wildcardMatch.toURI().toURL()
|
expectedClassPaths.add(wildcardMatch.getCanonicalFile().toURI().toURL()
|
||||||
.toExternalForm());
|
.toExternalForm());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
File fileCp = null;
|
File fileCp = null;
|
||||||
if(!new Path(classPath).isAbsolute()) {
|
if(!new Path(classPath).isAbsolute()) {
|
||||||
fileCp = new File(tmp, classPath);
|
fileCp = new File(tmp, classPath).getCanonicalFile();
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
fileCp = new File(classPath);
|
fileCp = new File(classPath).getCanonicalFile();
|
||||||
}
|
}
|
||||||
if (nonExistentSubdir.equals(classPath)) {
|
if (nonExistentSubdir.equals(classPath)) {
|
||||||
// expect to maintain trailing path separator if present in input, even
|
// expect to maintain trailing path separator if present in input, even
|
||||||
|
@ -1385,7 +1385,8 @@ public class TestFileUtil {
|
||||||
for (Path jar: jars) {
|
for (Path jar: jars) {
|
||||||
URL url = jar.toUri().toURL();
|
URL url = jar.toUri().toURL();
|
||||||
assertTrue("the jar should match either of the jars",
|
assertTrue("the jar should match either of the jars",
|
||||||
url.equals(jar1.toURI().toURL()) || url.equals(jar2.toURI().toURL()));
|
url.equals(jar1.getCanonicalFile().toURI().toURL()) ||
|
||||||
|
url.equals(jar2.getCanonicalFile().toURI().toURL()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,8 +25,6 @@ import static org.junit.Assert.fail;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
@ -39,7 +37,8 @@ import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
@ -51,8 +50,8 @@ import org.junit.Test;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
public class TestViewFileSystemLocalFileSystem extends ViewFileSystemBaseTest {
|
public class TestViewFileSystemLocalFileSystem extends ViewFileSystemBaseTest {
|
||||||
private static final Log LOG =
|
private static final Logger LOG =
|
||||||
LogFactory.getLog(TestViewFileSystemLocalFileSystem.class);
|
LoggerFactory.getLogger(TestViewFileSystemLocalFileSystem.class);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@Before
|
@Before
|
||||||
|
|
|
@ -21,8 +21,6 @@ import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
@ -35,6 +33,8 @@ import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
@ -43,8 +43,8 @@ import org.junit.Test;
|
||||||
*/
|
*/
|
||||||
public class TestViewFileSystemOverloadSchemeLocalFileSystem {
|
public class TestViewFileSystemOverloadSchemeLocalFileSystem {
|
||||||
private static final String FILE = "file";
|
private static final String FILE = "file";
|
||||||
private static final Log LOG =
|
private static final Logger LOG =
|
||||||
LogFactory.getLog(TestViewFileSystemOverloadSchemeLocalFileSystem.class);
|
LoggerFactory.getLogger(TestViewFileSystemOverloadSchemeLocalFileSystem.class);
|
||||||
private FileSystem fsTarget;
|
private FileSystem fsTarget;
|
||||||
private Configuration conf;
|
private Configuration conf;
|
||||||
private Path targetTestRoot;
|
private Path targetTestRoot;
|
||||||
|
|
|
@ -17,8 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.http;
|
package org.apache.hadoop.http;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.minikdc.MiniKdc;
|
import org.apache.hadoop.minikdc.MiniKdc;
|
||||||
|
@ -53,8 +51,6 @@ import static org.junit.Assert.assertTrue;
|
||||||
*/
|
*/
|
||||||
public class TestHttpServerWithSpnego {
|
public class TestHttpServerWithSpnego {
|
||||||
|
|
||||||
static final Log LOG = LogFactory.getLog(TestHttpServerWithSpnego.class);
|
|
||||||
|
|
||||||
private static final String SECRET_STR = "secret";
|
private static final String SECRET_STR = "secret";
|
||||||
private static final String HTTP_USER = "HTTP";
|
private static final String HTTP_USER = "HTTP";
|
||||||
private static final String PREFIX = "hadoop.http.authentication.";
|
private static final String PREFIX = "hadoop.http.authentication.";
|
||||||
|
|
|
@ -26,6 +26,9 @@ import org.apache.hadoop.io.SequenceFile.CompressionType;
|
||||||
import org.apache.hadoop.io.SequenceFile.Metadata;
|
import org.apache.hadoop.io.SequenceFile.Metadata;
|
||||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
import org.apache.hadoop.io.compress.CompressionCodec;
|
||||||
import org.apache.hadoop.io.compress.DefaultCodec;
|
import org.apache.hadoop.io.compress.DefaultCodec;
|
||||||
|
import org.apache.hadoop.io.serializer.Deserializer;
|
||||||
|
import org.apache.hadoop.io.serializer.Serialization;
|
||||||
|
import org.apache.hadoop.io.serializer.Serializer;
|
||||||
import org.apache.hadoop.io.serializer.avro.AvroReflectSerialization;
|
import org.apache.hadoop.io.serializer.avro.AvroReflectSerialization;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
|
@ -756,6 +759,122 @@ public class TestSequenceFile {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSerializationUsingWritableNameAlias() throws IOException {
|
||||||
|
Configuration config = new Configuration();
|
||||||
|
config.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, SimpleSerializer.class.getName());
|
||||||
|
Path path = new Path(System.getProperty("test.build.data", "."),
|
||||||
|
"SerializationUsingWritableNameAlias");
|
||||||
|
|
||||||
|
// write with the original serializable class
|
||||||
|
SequenceFile.Writer writer = SequenceFile.createWriter(
|
||||||
|
config,
|
||||||
|
SequenceFile.Writer.file(path),
|
||||||
|
SequenceFile.Writer.keyClass(SimpleSerializable.class),
|
||||||
|
SequenceFile.Writer.valueClass(SimpleSerializable.class));
|
||||||
|
|
||||||
|
int max = 10;
|
||||||
|
try {
|
||||||
|
SimpleSerializable val = new SimpleSerializable();
|
||||||
|
val.setId(-1);
|
||||||
|
for (int i = 0; i < max; i++) {
|
||||||
|
SimpleSerializable key = new SimpleSerializable();
|
||||||
|
key.setId(i);
|
||||||
|
writer.append(key, val);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
writer.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
// override name so it gets forced to the new serializable
|
||||||
|
WritableName.setName(AnotherSimpleSerializable.class, SimpleSerializable.class.getName());
|
||||||
|
|
||||||
|
// read and expect our new serializable, and all the correct values read
|
||||||
|
SequenceFile.Reader reader = new SequenceFile.Reader(
|
||||||
|
config,
|
||||||
|
SequenceFile.Reader.file(path));
|
||||||
|
|
||||||
|
AnotherSimpleSerializable key = new AnotherSimpleSerializable();
|
||||||
|
int count = 0;
|
||||||
|
while (true) {
|
||||||
|
key = (AnotherSimpleSerializable) reader.next(key);
|
||||||
|
if (key == null) {
|
||||||
|
// make sure we exhausted all the ints we wrote
|
||||||
|
assertEquals(count, max);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
assertEquals(count++, key.getId());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class SimpleSerializable implements Serializable {
|
||||||
|
|
||||||
|
private int id;
|
||||||
|
|
||||||
|
public int getId() {
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setId(int id) {
|
||||||
|
this.id = id;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class AnotherSimpleSerializable extends SimpleSerializable {
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class SimpleSerializer implements Serialization<SimpleSerializable> {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean accept(Class<?> c) {
|
||||||
|
return SimpleSerializable.class.isAssignableFrom(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Serializer<SimpleSerializable> getSerializer(Class<SimpleSerializable> c) {
|
||||||
|
return new Serializer<SimpleSerializable>() {
|
||||||
|
private DataOutputStream out;
|
||||||
|
@Override
|
||||||
|
public void open(OutputStream out) throws IOException {
|
||||||
|
this.out = new DataOutputStream(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(SimpleSerializable simpleSerializable) throws IOException {
|
||||||
|
out.writeInt(simpleSerializable.getId());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
out.close();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Deserializer<SimpleSerializable> getDeserializer(Class<SimpleSerializable> c) {
|
||||||
|
return new Deserializer<SimpleSerializable>() {
|
||||||
|
private DataInputStream dis;
|
||||||
|
@Override
|
||||||
|
public void open(InputStream in) throws IOException {
|
||||||
|
dis = new DataInputStream(in);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public SimpleSerializable deserialize(SimpleSerializable simpleSerializable)
|
||||||
|
throws IOException {
|
||||||
|
simpleSerializable.setId(dis.readInt());
|
||||||
|
return simpleSerializable;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
dis.close();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/** For debugging and testing. */
|
/** For debugging and testing. */
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
int count = 1024 * 1024;
|
int count = 1024 * 1024;
|
||||||
|
|
|
@ -24,8 +24,14 @@ import java.io.IOException;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
|
import org.apache.hadoop.io.serializer.Deserializer;
|
||||||
|
import org.apache.hadoop.io.serializer.Serialization;
|
||||||
|
import org.apache.hadoop.io.serializer.SerializationFactory;
|
||||||
|
import org.apache.hadoop.io.serializer.Serializer;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertNotNull;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
/** Unit tests for WritableName. */
|
/** Unit tests for WritableName. */
|
||||||
|
@ -63,6 +69,28 @@ public class TestWritableName {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static class SimpleSerializable {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class SimpleSerializer implements Serialization<SimpleSerializable> {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean accept(Class<?> c) {
|
||||||
|
return c.equals(SimpleSerializable.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Serializer<SimpleSerializable> getSerializer(Class<SimpleSerializable> c) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Deserializer<SimpleSerializable> getDeserializer(Class<SimpleSerializable> c) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private static final String testName = "mystring";
|
private static final String testName = "mystring";
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -95,7 +123,27 @@ public class TestWritableName {
|
||||||
// check original name still works
|
// check original name still works
|
||||||
test = WritableName.getClass(testName, conf);
|
test = WritableName.getClass(testName, conf);
|
||||||
assertTrue(test.equals(SimpleWritable.class));
|
assertTrue(test.equals(SimpleWritable.class));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAddNameSerializable() throws Exception {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, SimpleSerializer.class.getName());
|
||||||
|
SerializationFactory serializationFactory =
|
||||||
|
new SerializationFactory(conf);
|
||||||
|
|
||||||
|
String altName = testName + ".alt";
|
||||||
|
|
||||||
|
WritableName.addName(SimpleSerializable.class, altName);
|
||||||
|
|
||||||
|
Class<?> test = WritableName.getClass(altName, conf);
|
||||||
|
assertEquals(test, SimpleSerializable.class);
|
||||||
|
assertNotNull(serializationFactory.getSerialization(test));
|
||||||
|
|
||||||
|
// check original name still works
|
||||||
|
test = WritableName.getClass(SimpleSerializable.class.getName(), conf);
|
||||||
|
assertEquals(test, SimpleSerializable.class);
|
||||||
|
assertNotNull(serializationFactory.getSerialization(test));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -1168,6 +1168,10 @@ public class TestIPC {
|
||||||
|
|
||||||
call(client, addr, serviceClass, conf);
|
call(client, addr, serviceClass, conf);
|
||||||
Connection connection = server.getConnections()[0];
|
Connection connection = server.getConnections()[0];
|
||||||
|
LOG.info("Connection is from: {}", connection);
|
||||||
|
assertEquals(
|
||||||
|
"Connection string representation should include both IP address and Host name", 2,
|
||||||
|
connection.toString().split(" / ").length);
|
||||||
int serviceClass2 = connection.getServiceClass();
|
int serviceClass2 = connection.getServiceClass();
|
||||||
assertFalse(noChanged ^ serviceClass == serviceClass2);
|
assertFalse(noChanged ^ serviceClass == serviceClass2);
|
||||||
client.stop();
|
client.stop();
|
||||||
|
@ -1336,7 +1340,7 @@ public class TestIPC {
|
||||||
/**
|
/**
|
||||||
* Test the retry count while used in a retry proxy.
|
* Test the retry count while used in a retry proxy.
|
||||||
*/
|
*/
|
||||||
@Test(timeout=60000)
|
@Test(timeout=100000)
|
||||||
public void testRetryProxy() throws IOException {
|
public void testRetryProxy() throws IOException {
|
||||||
final Client client = new Client(LongWritable.class, conf);
|
final Client client = new Client(LongWritable.class, conf);
|
||||||
|
|
||||||
|
|
|
@ -378,7 +378,7 @@ public class TestRPC extends TestRpcBase {
|
||||||
assertEquals(confReaders, server.getNumReaders());
|
assertEquals(confReaders, server.getNumReaders());
|
||||||
|
|
||||||
server = newServerBuilder(conf)
|
server = newServerBuilder(conf)
|
||||||
.setNumHandlers(1).setnumReaders(3).setQueueSizePerHandler(200)
|
.setNumHandlers(1).setNumReaders(3).setQueueSizePerHandler(200)
|
||||||
.setVerbose(false).build();
|
.setVerbose(false).build();
|
||||||
|
|
||||||
assertEquals(3, server.getNumReaders());
|
assertEquals(3, server.getNumReaders());
|
||||||
|
@ -1849,6 +1849,11 @@ public class TestRPC extends TestRpcBase {
|
||||||
// if it wasn't fatal, verify there's only one open connection.
|
// if it wasn't fatal, verify there's only one open connection.
|
||||||
Connection[] conns = server.getConnections();
|
Connection[] conns = server.getConnections();
|
||||||
assertEquals(reqName, 1, conns.length);
|
assertEquals(reqName, 1, conns.length);
|
||||||
|
String connectionInfo = conns[0].toString();
|
||||||
|
LOG.info("Connection is from: {}", connectionInfo);
|
||||||
|
assertEquals(
|
||||||
|
"Connection string representation should include both IP address and Host name", 2,
|
||||||
|
connectionInfo.split(" / ").length);
|
||||||
// verify whether the connection should have been reused.
|
// verify whether the connection should have been reused.
|
||||||
if (isDisconnected) {
|
if (isDisconnected) {
|
||||||
assertNotSame(reqName, lastConn, conns[0]);
|
assertNotSame(reqName, lastConn, conns[0]);
|
||||||
|
|
|
@ -22,8 +22,6 @@ import com.fasterxml.jackson.databind.JsonNode;
|
||||||
import com.fasterxml.jackson.databind.node.ContainerNode;
|
import com.fasterxml.jackson.databind.node.ContainerNode;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.log4j.Appender;
|
import org.apache.log4j.Appender;
|
||||||
import org.apache.log4j.Category;
|
import org.apache.log4j.Category;
|
||||||
|
@ -44,8 +42,6 @@ import java.util.Vector;
|
||||||
|
|
||||||
public class TestLog4Json {
|
public class TestLog4Json {
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(TestLog4Json.class);
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testConstruction() throws Throwable {
|
public void testConstruction() throws Throwable {
|
||||||
Log4Json l4j = new Log4Json();
|
Log4Json l4j = new Log4Json();
|
||||||
|
|
|
@ -22,9 +22,6 @@ import java.net.SocketException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.concurrent.Callable;
|
import java.util.concurrent.Callable;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
|
@ -70,8 +67,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
|
||||||
private final String logName = TestLogLevel.class.getName();
|
private final String logName = TestLogLevel.class.getName();
|
||||||
private String clientPrincipal;
|
private String clientPrincipal;
|
||||||
private String serverPrincipal;
|
private String serverPrincipal;
|
||||||
private final Log testlog = LogFactory.getLog(logName);
|
private final Logger log = Logger.getLogger(logName);
|
||||||
private final Logger log = ((Log4JLogger)testlog).getLogger();
|
|
||||||
private final static String PRINCIPAL = "loglevel.principal";
|
private final static String PRINCIPAL = "loglevel.principal";
|
||||||
private final static String KEYTAB = "loglevel.keytab";
|
private final static String KEYTAB = "loglevel.keytab";
|
||||||
private static final String PREFIX = "hadoop.http.authentication.";
|
private static final String PREFIX = "hadoop.http.authentication.";
|
||||||
|
|
|
@ -18,21 +18,27 @@
|
||||||
|
|
||||||
package org.apache.hadoop.security.authentication.server;
|
package org.apache.hadoop.security.authentication.server;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.PrintWriter;
|
||||||
import java.security.Principal;
|
import java.security.Principal;
|
||||||
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Enumeration;
|
import java.util.Enumeration;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.servlet.FilterConfig;
|
import javax.servlet.FilterConfig;
|
||||||
import javax.servlet.FilterChain;
|
import javax.servlet.FilterChain;
|
||||||
import javax.servlet.ServletContext;
|
import javax.servlet.ServletContext;
|
||||||
|
import javax.servlet.ServletOutputStream;
|
||||||
import javax.servlet.ServletResponse;
|
import javax.servlet.ServletResponse;
|
||||||
import javax.servlet.ServletRequest;
|
import javax.servlet.ServletRequest;
|
||||||
|
import javax.servlet.http.Cookie;
|
||||||
import javax.servlet.http.HttpServletRequest;
|
import javax.servlet.http.HttpServletRequest;
|
||||||
|
import javax.servlet.http.HttpServletResponse;
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
import org.glassfish.grizzly.servlet.HttpServletResponseImpl;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
@ -76,8 +82,192 @@ public class TestProxyUserAuthenticationFilter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private class HttpServletResponseForTest extends HttpServletResponseImpl {
|
private class HttpServletResponseForTest implements HttpServletResponse {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void addCookie(Cookie cookie) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean containsHeader(String name) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String encodeURL(String url) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String encodeRedirectURL(String url) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String encodeUrl(String url) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String encodeRedirectUrl(String url) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void sendError(int sc, String msg) throws IOException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void sendError(int sc) throws IOException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void sendRedirect(String location) throws IOException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setDateHeader(String name, long date) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void addDateHeader(String name, long date) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setHeader(String name, String value) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void addHeader(String name, String value) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setIntHeader(String name, int value) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void addIntHeader(String name, int value) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setStatus(int sc) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setStatus(int sc, String sm) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getStatus() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHeader(String name) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<String> getHeaders(String name) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<String> getHeaderNames() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getCharacterEncoding() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getContentType() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ServletOutputStream getOutputStream() throws IOException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PrintWriter getWriter() throws IOException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setCharacterEncoding(String charset) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setContentLength(int len) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setContentLengthLong(long len) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setContentType(String type) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setBufferSize(int size) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getBufferSize() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void flushBuffer() throws IOException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void resetBuffer() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCommitted() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void reset() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setLocale(Locale loc) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Locale getLocale() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -49,8 +49,6 @@ import java.util.function.Supplier;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.apache.commons.lang3.RandomStringUtils;
|
import org.apache.commons.lang3.RandomStringUtils;
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -117,29 +115,11 @@ public abstract class GenericTestUtils {
|
||||||
public static final String ERROR_INVALID_ARGUMENT =
|
public static final String ERROR_INVALID_ARGUMENT =
|
||||||
"Total wait time should be greater than check interval time";
|
"Total wait time should be greater than check interval time";
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
public static void disableLog(Log log) {
|
|
||||||
// We expect that commons-logging is a wrapper around Log4j.
|
|
||||||
disableLog((Log4JLogger) log);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public static Logger toLog4j(org.slf4j.Logger logger) {
|
public static Logger toLog4j(org.slf4j.Logger logger) {
|
||||||
return LogManager.getLogger(logger.getName());
|
return LogManager.getLogger(logger.getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static void disableLog(Log4JLogger log) {
|
|
||||||
log.getLogger().setLevel(Level.OFF);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
|
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
|
||||||
*/
|
*/
|
||||||
|
@ -152,45 +132,6 @@ public abstract class GenericTestUtils {
|
||||||
disableLog(toLog4j(logger));
|
disableLog(toLog4j(logger));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated
|
|
||||||
* use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
public static void setLogLevel(Log log, Level level) {
|
|
||||||
// We expect that commons-logging is a wrapper around Log4j.
|
|
||||||
setLogLevel((Log4JLogger) log, level);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A helper used in log4j2 migration to accept legacy
|
|
||||||
* org.apache.commons.logging apis.
|
|
||||||
* <p>
|
|
||||||
* And will be removed after migration.
|
|
||||||
*
|
|
||||||
* @param log a log
|
|
||||||
* @param level level to be set
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static void setLogLevel(Log log, org.slf4j.event.Level level) {
|
|
||||||
setLogLevel(log, Level.toLevel(level.toString()));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated
|
|
||||||
* use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static void setLogLevel(Log4JLogger log, Level level) {
|
|
||||||
log.getLogger().setLevel(level);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated
|
|
||||||
* use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static void setLogLevel(Logger logger, Level level) {
|
public static void setLogLevel(Logger logger, Level level) {
|
||||||
logger.setLevel(level);
|
logger.setLevel(level);
|
||||||
}
|
}
|
||||||
|
@ -535,13 +476,15 @@ public abstract class GenericTestUtils {
|
||||||
private WriterAppender appender;
|
private WriterAppender appender;
|
||||||
private Logger logger;
|
private Logger logger;
|
||||||
|
|
||||||
public static LogCapturer captureLogs(Log l) {
|
public static LogCapturer captureLogs(org.slf4j.Logger logger) {
|
||||||
Logger logger = ((Log4JLogger)l).getLogger();
|
if (logger.getName().equals("root")) {
|
||||||
return new LogCapturer(logger);
|
return new LogCapturer(org.apache.log4j.Logger.getRootLogger());
|
||||||
|
}
|
||||||
|
return new LogCapturer(toLog4j(logger));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static LogCapturer captureLogs(org.slf4j.Logger logger) {
|
public static LogCapturer captureLogs(Logger logger) {
|
||||||
return new LogCapturer(toLog4j(logger));
|
return new LogCapturer(logger);
|
||||||
}
|
}
|
||||||
|
|
||||||
private LogCapturer(Logger logger) {
|
private LogCapturer(Logger logger) {
|
||||||
|
|
|
@ -18,10 +18,10 @@
|
||||||
|
|
||||||
package org.apache.hadoop.util;
|
package org.apache.hadoop.util;
|
||||||
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
|
@ -43,7 +43,7 @@ public class TestJarFinder {
|
||||||
public void testJar() throws Exception {
|
public void testJar() throws Exception {
|
||||||
|
|
||||||
//picking a class that is for sure in a JAR in the classpath
|
//picking a class that is for sure in a JAR in the classpath
|
||||||
String jar = JarFinder.getJar(LogFactory.class);
|
String jar = JarFinder.getJar(LoggerFactory.class);
|
||||||
Assert.assertTrue(new File(jar).exists());
|
Assert.assertTrue(new File(jar).exists());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,9 +32,9 @@ public class TestSignalLogger {
|
||||||
@Test(timeout=60000)
|
@Test(timeout=60000)
|
||||||
public void testInstall() throws Exception {
|
public void testInstall() throws Exception {
|
||||||
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
|
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
|
||||||
SignalLogger.INSTANCE.register(LogAdapter.create(LOG));
|
SignalLogger.INSTANCE.register(LOG);
|
||||||
try {
|
try {
|
||||||
SignalLogger.INSTANCE.register(LogAdapter.create(LOG));
|
SignalLogger.INSTANCE.register(LOG);
|
||||||
Assert.fail("expected IllegalStateException from double registration");
|
Assert.fail("expected IllegalStateException from double registration");
|
||||||
} catch (IllegalStateException e) {
|
} catch (IllegalStateException e) {
|
||||||
// fall through
|
// fall through
|
||||||
|
|
|
@ -63,11 +63,6 @@
|
||||||
<artifactId>mockito-core</artifactId>
|
<artifactId>mockito-core</artifactId>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>commons-logging</groupId>
|
|
||||||
<artifactId>commons-logging</artifactId>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>javax.servlet</groupId>
|
<groupId>javax.servlet</groupId>
|
||||||
<artifactId>javax.servlet-api</artifactId>
|
<artifactId>javax.servlet-api</artifactId>
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.oncrpc;
|
package org.apache.hadoop.oncrpc;
|
||||||
|
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
import java.net.SocketAddress;
|
import java.net.SocketAddress;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -26,6 +27,7 @@ import io.netty.buffer.Unpooled;
|
||||||
import io.netty.channel.ChannelHandler;
|
import io.netty.channel.ChannelHandler;
|
||||||
import io.netty.channel.ChannelHandlerContext;
|
import io.netty.channel.ChannelHandlerContext;
|
||||||
import io.netty.channel.ChannelInboundHandlerAdapter;
|
import io.netty.channel.ChannelInboundHandlerAdapter;
|
||||||
|
import io.netty.channel.SimpleChannelInboundHandler;
|
||||||
import io.netty.channel.socket.DatagramPacket;
|
import io.netty.channel.socket.DatagramPacket;
|
||||||
import io.netty.handler.codec.ByteToMessageDecoder;
|
import io.netty.handler.codec.ByteToMessageDecoder;
|
||||||
import org.apache.hadoop.classification.VisibleForTesting;
|
import org.apache.hadoop.classification.VisibleForTesting;
|
||||||
|
@ -172,15 +174,18 @@ public final class RpcUtil {
|
||||||
*/
|
*/
|
||||||
@ChannelHandler.Sharable
|
@ChannelHandler.Sharable
|
||||||
private static final class RpcUdpResponseStage extends
|
private static final class RpcUdpResponseStage extends
|
||||||
ChannelInboundHandlerAdapter {
|
SimpleChannelInboundHandler<RpcResponse> {
|
||||||
|
public RpcUdpResponseStage() {
|
||||||
|
// do not auto release the RpcResponse message.
|
||||||
|
super(false);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void channelRead(ChannelHandlerContext ctx, Object msg)
|
protected void channelRead0(ChannelHandlerContext ctx,
|
||||||
throws Exception {
|
RpcResponse response) throws Exception {
|
||||||
RpcResponse r = (RpcResponse) msg;
|
ByteBuf buf = Unpooled.wrappedBuffer(response.data());
|
||||||
// TODO: check out https://github.com/netty/netty/issues/1282 for
|
ctx.writeAndFlush(new DatagramPacket(
|
||||||
// correct usage
|
buf, (InetSocketAddress) response.recipient()));
|
||||||
ctx.channel().writeAndFlush(r.data());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,15 +117,13 @@ final class Portmap {
|
||||||
.childOption(ChannelOption.SO_REUSEADDR, true)
|
.childOption(ChannelOption.SO_REUSEADDR, true)
|
||||||
.channel(NioServerSocketChannel.class)
|
.channel(NioServerSocketChannel.class)
|
||||||
.childHandler(new ChannelInitializer<SocketChannel>() {
|
.childHandler(new ChannelInitializer<SocketChannel>() {
|
||||||
private final IdleStateHandler idleStateHandler = new IdleStateHandler(
|
|
||||||
0, 0, idleTimeMilliSeconds, TimeUnit.MILLISECONDS);
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void initChannel(SocketChannel ch) throws Exception {
|
protected void initChannel(SocketChannel ch) throws Exception {
|
||||||
ChannelPipeline p = ch.pipeline();
|
ChannelPipeline p = ch.pipeline();
|
||||||
|
|
||||||
p.addLast(RpcUtil.constructRpcFrameDecoder(),
|
p.addLast(RpcUtil.constructRpcFrameDecoder(),
|
||||||
RpcUtil.STAGE_RPC_MESSAGE_PARSER, idleStateHandler, handler,
|
RpcUtil.STAGE_RPC_MESSAGE_PARSER, new IdleStateHandler(0, 0,
|
||||||
|
idleTimeMilliSeconds, TimeUnit.MILLISECONDS), handler,
|
||||||
RpcUtil.STAGE_RPC_TCP_RESPONSE);
|
RpcUtil.STAGE_RPC_TCP_RESPONSE);
|
||||||
}});
|
}});
|
||||||
|
|
||||||
|
|
|
@ -23,8 +23,10 @@ import java.net.DatagramPacket;
|
||||||
import java.net.DatagramSocket;
|
import java.net.DatagramSocket;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.Socket;
|
import java.net.Socket;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.apache.hadoop.oncrpc.RpcReply;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
|
||||||
import org.apache.hadoop.oncrpc.RpcCall;
|
import org.apache.hadoop.oncrpc.RpcCall;
|
||||||
|
@ -35,6 +37,8 @@ import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
public class TestPortmap {
|
public class TestPortmap {
|
||||||
private static Portmap pm = new Portmap();
|
private static Portmap pm = new Portmap();
|
||||||
private static final int SHORT_TIMEOUT_MILLISECONDS = 10;
|
private static final int SHORT_TIMEOUT_MILLISECONDS = 10;
|
||||||
|
@ -92,6 +96,19 @@ public class TestPortmap {
|
||||||
pm.getUdpServerLoAddress());
|
pm.getUdpServerLoAddress());
|
||||||
try {
|
try {
|
||||||
s.send(p);
|
s.send(p);
|
||||||
|
|
||||||
|
// verify that portmap server responds a UDF packet back to the client
|
||||||
|
byte[] receiveData = new byte[65535];
|
||||||
|
DatagramPacket receivePacket = new DatagramPacket(receiveData,
|
||||||
|
receiveData.length);
|
||||||
|
s.setSoTimeout(2000);
|
||||||
|
s.receive(receivePacket);
|
||||||
|
|
||||||
|
// verify that the registration is accepted.
|
||||||
|
XDR xdr = new XDR(Arrays.copyOfRange(receiveData, 0,
|
||||||
|
receivePacket.getLength()));
|
||||||
|
RpcReply reply = RpcReply.read(xdr);
|
||||||
|
assertEquals(reply.getState(), RpcReply.ReplyState.MSG_ACCEPTED);
|
||||||
} finally {
|
} finally {
|
||||||
s.close();
|
s.close();
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,10 +61,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<scope>provided</scope>
|
<scope>provided</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
<exclusion>
|
|
||||||
<groupId>commons-logging</groupId>
|
|
||||||
<artifactId>commons-logging</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>log4j</groupId>
|
<groupId>log4j</groupId>
|
||||||
<artifactId>log4j</artifactId>
|
<artifactId>log4j</artifactId>
|
||||||
|
|
|
@ -133,11 +133,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<artifactId>commons-io</artifactId>
|
<artifactId>commons-io</artifactId>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>commons-logging</groupId>
|
|
||||||
<artifactId>commons-logging</artifactId>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>commons-daemon</groupId>
|
<groupId>commons-daemon</groupId>
|
||||||
<artifactId>commons-daemon</artifactId>
|
<artifactId>commons-daemon</artifactId>
|
||||||
|
|
|
@ -49,10 +49,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<scope>provided</scope>
|
<scope>provided</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
<exclusion>
|
|
||||||
<groupId>commons-logging</groupId>
|
|
||||||
<artifactId>commons-logging</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>log4j</groupId>
|
<groupId>log4j</groupId>
|
||||||
<artifactId>log4j</artifactId>
|
<artifactId>log4j</artifactId>
|
||||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.metrics2.annotation.Metrics;
|
||||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||||
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
|
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
|
||||||
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
|
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
|
||||||
|
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
|
||||||
import org.apache.hadoop.metrics2.lib.MutableRate;
|
import org.apache.hadoop.metrics2.lib.MutableRate;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.VisibleForTesting;
|
import org.apache.hadoop.classification.VisibleForTesting;
|
||||||
|
@ -136,6 +137,19 @@ public class StateStoreMetrics implements StateStoreMBean {
|
||||||
counter.set(size);
|
counter.set(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* set the count of the location cache access information.
|
||||||
|
* @param name Name of the record.
|
||||||
|
* @param count count of the record.
|
||||||
|
*/
|
||||||
|
public void setLocationCache(String name, long count) {
|
||||||
|
MutableGaugeLong counter = (MutableGaugeLong) registry.get(name);
|
||||||
|
if (counter == null) {
|
||||||
|
counter = registry.newGauge(name, name, count);
|
||||||
|
}
|
||||||
|
counter.set(count);
|
||||||
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public void reset() {
|
public void reset() {
|
||||||
reads.resetMinMax();
|
reads.resetMinMax();
|
||||||
|
|
|
@ -42,6 +42,7 @@ import java.util.concurrent.Callable;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.ConcurrentMap;
|
import java.util.concurrent.ConcurrentMap;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.atomic.LongAdder;
|
||||||
import java.util.concurrent.locks.Lock;
|
import java.util.concurrent.locks.Lock;
|
||||||
import java.util.concurrent.locks.ReadWriteLock;
|
import java.util.concurrent.locks.ReadWriteLock;
|
||||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
|
@ -51,6 +52,7 @@ import java.util.ArrayList;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.metrics.StateStoreMetrics;
|
||||||
import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
|
import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
|
||||||
import org.apache.hadoop.hdfs.server.federation.router.Router;
|
import org.apache.hadoop.hdfs.server.federation.router.Router;
|
||||||
import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
|
import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
|
||||||
|
@ -97,6 +99,8 @@ public class MountTableResolver
|
||||||
private final TreeMap<String, MountTable> tree = new TreeMap<>();
|
private final TreeMap<String, MountTable> tree = new TreeMap<>();
|
||||||
/** Path -> Remote location. */
|
/** Path -> Remote location. */
|
||||||
private final Cache<String, PathLocation> locationCache;
|
private final Cache<String, PathLocation> locationCache;
|
||||||
|
private final LongAdder locCacheMiss = new LongAdder();
|
||||||
|
private final LongAdder locCacheAccess = new LongAdder();
|
||||||
|
|
||||||
/** Default nameservice when no mount matches the math. */
|
/** Default nameservice when no mount matches the math. */
|
||||||
private String defaultNameService = "";
|
private String defaultNameService = "";
|
||||||
|
@ -408,6 +412,9 @@ public class MountTableResolver
|
||||||
mountTable.getMountTableEntries(request);
|
mountTable.getMountTableEntries(request);
|
||||||
List<MountTable> records = response.getEntries();
|
List<MountTable> records = response.getEntries();
|
||||||
refreshEntries(records);
|
refreshEntries(records);
|
||||||
|
StateStoreMetrics metrics = this.getMountTableStore().getDriver().getMetrics();
|
||||||
|
metrics.setLocationCache("locationCacheMissed", this.getLocCacheMiss().sum());
|
||||||
|
metrics.setLocationCache("locationCacheAccessed", this.getLocCacheAccess().sum());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Cannot fetch mount table entries from State Store", e);
|
LOG.error("Cannot fetch mount table entries from State Store", e);
|
||||||
return false;
|
return false;
|
||||||
|
@ -441,9 +448,12 @@ public class MountTableResolver
|
||||||
if (this.locationCache == null) {
|
if (this.locationCache == null) {
|
||||||
res = lookupLocation(processTrashPath(path));
|
res = lookupLocation(processTrashPath(path));
|
||||||
} else {
|
} else {
|
||||||
Callable<? extends PathLocation> meh = (Callable<PathLocation>) () ->
|
Callable<? extends PathLocation> meh = (Callable<PathLocation>) () -> {
|
||||||
lookupLocation(processTrashPath(path));
|
this.getLocCacheMiss().increment();
|
||||||
|
return lookupLocation(processTrashPath(path));
|
||||||
|
};
|
||||||
res = this.locationCache.get(processTrashPath(path), meh);
|
res = this.locationCache.get(processTrashPath(path), meh);
|
||||||
|
this.getLocCacheAccess().increment();
|
||||||
}
|
}
|
||||||
if (isTrashPath(path)) {
|
if (isTrashPath(path)) {
|
||||||
List<RemoteLocation> remoteLocations = new ArrayList<>();
|
List<RemoteLocation> remoteLocations = new ArrayList<>();
|
||||||
|
@ -699,4 +709,12 @@ public class MountTableResolver
|
||||||
public void setDisabled(boolean disable) {
|
public void setDisabled(boolean disable) {
|
||||||
this.disabled = disable;
|
this.disabled = disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public LongAdder getLocCacheMiss() {
|
||||||
|
return locCacheMiss;
|
||||||
|
}
|
||||||
|
|
||||||
|
public LongAdder getLocCacheAccess() {
|
||||||
|
return locCacheAccess;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,7 @@ import java.io.IOException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
import java.security.PrivilegedExceptionAction;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
@ -45,6 +46,7 @@ import org.apache.hadoop.hdfs.tools.DFSHAAdmin;
|
||||||
import org.apache.hadoop.hdfs.tools.NNHAServiceTarget;
|
import org.apache.hadoop.hdfs.tools.NNHAServiceTarget;
|
||||||
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
import org.codehaus.jettison.json.JSONArray;
|
import org.codehaus.jettison.json.JSONArray;
|
||||||
import org.codehaus.jettison.json.JSONException;
|
import org.codehaus.jettison.json.JSONException;
|
||||||
import org.codehaus.jettison.json.JSONObject;
|
import org.codehaus.jettison.json.JSONObject;
|
||||||
|
@ -236,7 +238,15 @@ public class NamenodeHeartbeatService extends PeriodicService {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void periodicInvoke() {
|
public void periodicInvoke() {
|
||||||
updateState();
|
try {
|
||||||
|
// Run using the login user credentials
|
||||||
|
SecurityUtil.doAsLoginUser((PrivilegedExceptionAction<Void>) () -> {
|
||||||
|
updateState();
|
||||||
|
return null;
|
||||||
|
});
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.error("Cannot update namenode state", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -239,6 +239,18 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic {
|
||||||
public static final long
|
public static final long
|
||||||
FEDERATION_STORE_ROUTER_EXPIRATION_DELETION_MS_DEFAULT = -1;
|
FEDERATION_STORE_ROUTER_EXPIRATION_DELETION_MS_DEFAULT = -1;
|
||||||
|
|
||||||
|
// HDFS Router-based federation State Store ZK DRIVER
|
||||||
|
public static final String FEDERATION_STORE_ZK_DRIVER_PREFIX =
|
||||||
|
RBFConfigKeys.FEDERATION_STORE_PREFIX + "driver.zk.";
|
||||||
|
public static final String FEDERATION_STORE_ZK_PARENT_PATH =
|
||||||
|
FEDERATION_STORE_ZK_DRIVER_PREFIX + "parent-path";
|
||||||
|
public static final String FEDERATION_STORE_ZK_PARENT_PATH_DEFAULT =
|
||||||
|
"/hdfs-federation";
|
||||||
|
public static final String FEDERATION_STORE_ZK_ASYNC_MAX_THREADS =
|
||||||
|
FEDERATION_STORE_ZK_DRIVER_PREFIX + "async.max.threads";
|
||||||
|
public static final int FEDERATION_STORE_ZK_ASYNC_MAX_THREADS_DEFAULT =
|
||||||
|
-1;
|
||||||
|
|
||||||
// HDFS Router safe mode
|
// HDFS Router safe mode
|
||||||
public static final String DFS_ROUTER_SAFEMODE_ENABLE =
|
public static final String DFS_ROUTER_SAFEMODE_ENABLE =
|
||||||
FEDERATION_ROUTER_PREFIX + "safemode.enable";
|
FEDERATION_ROUTER_PREFIX + "safemode.enable";
|
||||||
|
|
|
@ -333,7 +333,7 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol,
|
||||||
.setBindAddress(confRpcAddress.getHostName())
|
.setBindAddress(confRpcAddress.getHostName())
|
||||||
.setPort(confRpcAddress.getPort())
|
.setPort(confRpcAddress.getPort())
|
||||||
.setNumHandlers(handlerCount)
|
.setNumHandlers(handlerCount)
|
||||||
.setnumReaders(readerCount)
|
.setNumReaders(readerCount)
|
||||||
.setQueueSizePerHandler(handlerQueueSize)
|
.setQueueSizePerHandler(handlerQueueSize)
|
||||||
.setVerbose(false)
|
.setVerbose(false)
|
||||||
.setAlignmentContext(routerStateIdContext)
|
.setAlignmentContext(routerStateIdContext)
|
||||||
|
|
|
@ -73,7 +73,7 @@ public abstract class RecordStore<R extends BaseRecord> {
|
||||||
*
|
*
|
||||||
* @return State Store driver.
|
* @return State Store driver.
|
||||||
*/
|
*/
|
||||||
protected StateStoreDriver getDriver() {
|
public StateStoreDriver getDriver() {
|
||||||
return this.driver;
|
return this.driver;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,16 @@ import static org.apache.hadoop.util.Time.monotonicNow;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.concurrent.Callable;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
import java.util.concurrent.LinkedBlockingQueue;
|
||||||
|
import java.util.concurrent.ThreadFactory;
|
||||||
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.VisibleForTesting;
|
||||||
|
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||||
import org.apache.curator.framework.CuratorFramework;
|
import org.apache.curator.framework.CuratorFramework;
|
||||||
import org.apache.curator.framework.imps.CuratorFrameworkState;
|
import org.apache.curator.framework.imps.CuratorFrameworkState;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -57,14 +66,9 @@ public class StateStoreZooKeeperImpl extends StateStoreSerializableImpl {
|
||||||
private static final Logger LOG =
|
private static final Logger LOG =
|
||||||
LoggerFactory.getLogger(StateStoreZooKeeperImpl.class);
|
LoggerFactory.getLogger(StateStoreZooKeeperImpl.class);
|
||||||
|
|
||||||
|
/** Service to get/update zk state. */
|
||||||
/** Configuration keys. */
|
private ThreadPoolExecutor executorService;
|
||||||
public static final String FEDERATION_STORE_ZK_DRIVER_PREFIX =
|
private boolean enableConcurrent;
|
||||||
RBFConfigKeys.FEDERATION_STORE_PREFIX + "driver.zk.";
|
|
||||||
public static final String FEDERATION_STORE_ZK_PARENT_PATH =
|
|
||||||
FEDERATION_STORE_ZK_DRIVER_PREFIX + "parent-path";
|
|
||||||
public static final String FEDERATION_STORE_ZK_PARENT_PATH_DEFAULT =
|
|
||||||
"/hdfs-federation";
|
|
||||||
|
|
||||||
|
|
||||||
/** Directory to store the state store data. */
|
/** Directory to store the state store data. */
|
||||||
|
@ -82,8 +86,22 @@ public class StateStoreZooKeeperImpl extends StateStoreSerializableImpl {
|
||||||
|
|
||||||
Configuration conf = getConf();
|
Configuration conf = getConf();
|
||||||
baseZNode = conf.get(
|
baseZNode = conf.get(
|
||||||
FEDERATION_STORE_ZK_PARENT_PATH,
|
RBFConfigKeys.FEDERATION_STORE_ZK_PARENT_PATH,
|
||||||
FEDERATION_STORE_ZK_PARENT_PATH_DEFAULT);
|
RBFConfigKeys.FEDERATION_STORE_ZK_PARENT_PATH_DEFAULT);
|
||||||
|
int numThreads = conf.getInt(
|
||||||
|
RBFConfigKeys.FEDERATION_STORE_ZK_ASYNC_MAX_THREADS,
|
||||||
|
RBFConfigKeys.FEDERATION_STORE_ZK_ASYNC_MAX_THREADS_DEFAULT);
|
||||||
|
enableConcurrent = numThreads > 0;
|
||||||
|
if (enableConcurrent) {
|
||||||
|
ThreadFactory threadFactory = new ThreadFactoryBuilder()
|
||||||
|
.setNameFormat("StateStore ZK Client-%d")
|
||||||
|
.build();
|
||||||
|
this.executorService = new ThreadPoolExecutor(numThreads, numThreads,
|
||||||
|
0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(), threadFactory);
|
||||||
|
LOG.info("Init StateStoreZookeeperImpl by async mode with {} threads.", numThreads);
|
||||||
|
} else {
|
||||||
|
LOG.info("Init StateStoreZookeeperImpl by sync mode.");
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
this.zkManager = new ZKCuratorManager(conf);
|
this.zkManager = new ZKCuratorManager(conf);
|
||||||
this.zkManager.start();
|
this.zkManager.start();
|
||||||
|
@ -109,8 +127,16 @@ public class StateStoreZooKeeperImpl extends StateStoreSerializableImpl {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
public void setEnableConcurrent(boolean enableConcurrent) {
|
||||||
|
this.enableConcurrent = enableConcurrent;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws Exception {
|
public void close() throws Exception {
|
||||||
|
if (executorService != null) {
|
||||||
|
executorService.shutdown();
|
||||||
|
}
|
||||||
if (zkManager != null) {
|
if (zkManager != null) {
|
||||||
zkManager.close();
|
zkManager.close();
|
||||||
}
|
}
|
||||||
|
@ -136,34 +162,21 @@ public class StateStoreZooKeeperImpl extends StateStoreSerializableImpl {
|
||||||
List<T> ret = new ArrayList<>();
|
List<T> ret = new ArrayList<>();
|
||||||
String znode = getZNodeForClass(clazz);
|
String znode = getZNodeForClass(clazz);
|
||||||
try {
|
try {
|
||||||
List<String> children = zkManager.getChildren(znode);
|
List<Callable<T>> callables = new ArrayList<>();
|
||||||
for (String child : children) {
|
zkManager.getChildren(znode).forEach(c -> callables.add(() -> getRecord(clazz, znode, c)));
|
||||||
try {
|
if (enableConcurrent) {
|
||||||
String path = getNodePath(znode, child);
|
List<Future<T>> futures = executorService.invokeAll(callables);
|
||||||
Stat stat = new Stat();
|
for (Future<T> future : futures) {
|
||||||
String data = zkManager.getStringData(path, stat);
|
if (future.get() != null) {
|
||||||
boolean corrupted = false;
|
ret.add(future.get());
|
||||||
if (data == null || data.equals("")) {
|
|
||||||
// All records should have data, otherwise this is corrupted
|
|
||||||
corrupted = true;
|
|
||||||
} else {
|
|
||||||
try {
|
|
||||||
T record = createRecord(data, stat, clazz);
|
|
||||||
ret.add(record);
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.error("Cannot create record type \"{}\" from \"{}\": {}",
|
|
||||||
clazz.getSimpleName(), data, e.getMessage());
|
|
||||||
corrupted = true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if (corrupted) {
|
} else {
|
||||||
LOG.error("Cannot get data for {} at {}, cleaning corrupted data",
|
for (Callable<T> callable : callables) {
|
||||||
child, path);
|
T record = callable.call();
|
||||||
zkManager.delete(path);
|
if (record != null) {
|
||||||
|
ret.add(record);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
|
||||||
LOG.error("Cannot get data for {}: {}", child, e.getMessage());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
@ -178,6 +191,44 @@ public class StateStoreZooKeeperImpl extends StateStoreSerializableImpl {
|
||||||
return new QueryResult<T>(ret, getTime());
|
return new QueryResult<T>(ret, getTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get one data record in the StateStore or delete it if it's corrupted.
|
||||||
|
*
|
||||||
|
* @param clazz Record class to evaluate.
|
||||||
|
* @param znode The ZNode for the class.
|
||||||
|
* @param child The child for znode to get.
|
||||||
|
* @return The record to get.
|
||||||
|
*/
|
||||||
|
private <T extends BaseRecord> T getRecord(Class<T> clazz, String znode, String child) {
|
||||||
|
T record = null;
|
||||||
|
try {
|
||||||
|
String path = getNodePath(znode, child);
|
||||||
|
Stat stat = new Stat();
|
||||||
|
String data = zkManager.getStringData(path, stat);
|
||||||
|
boolean corrupted = false;
|
||||||
|
if (data == null || data.equals("")) {
|
||||||
|
// All records should have data, otherwise this is corrupted
|
||||||
|
corrupted = true;
|
||||||
|
} else {
|
||||||
|
try {
|
||||||
|
record = createRecord(data, stat, clazz);
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.error("Cannot create record type \"{}\" from \"{}\": {}",
|
||||||
|
clazz.getSimpleName(), data, e.getMessage());
|
||||||
|
corrupted = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (corrupted) {
|
||||||
|
LOG.error("Cannot get data for {} at {}, cleaning corrupted data", child, path);
|
||||||
|
zkManager.delete(path);
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOG.error("Cannot get data for {}: {}", child, e.getMessage());
|
||||||
|
}
|
||||||
|
return record;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public <T extends BaseRecord> boolean putAll(
|
public <T extends BaseRecord> boolean putAll(
|
||||||
List<T> records, boolean update, boolean error) throws IOException {
|
List<T> records, boolean update, boolean error) throws IOException {
|
||||||
|
@ -192,22 +243,40 @@ public class StateStoreZooKeeperImpl extends StateStoreSerializableImpl {
|
||||||
String znode = getZNodeForClass(recordClass);
|
String znode = getZNodeForClass(recordClass);
|
||||||
|
|
||||||
long start = monotonicNow();
|
long start = monotonicNow();
|
||||||
boolean status = true;
|
final AtomicBoolean status = new AtomicBoolean(true);
|
||||||
for (T record : records) {
|
List<Callable<Void>> callables = new ArrayList<>();
|
||||||
String primaryKey = getPrimaryKey(record);
|
records.forEach(record ->
|
||||||
String recordZNode = getNodePath(znode, primaryKey);
|
callables.add(
|
||||||
byte[] data = serialize(record);
|
() -> {
|
||||||
if (!writeNode(recordZNode, data, update, error)){
|
String primaryKey = getPrimaryKey(record);
|
||||||
status = false;
|
String recordZNode = getNodePath(znode, primaryKey);
|
||||||
|
byte[] data = serialize(record);
|
||||||
|
if (!writeNode(recordZNode, data, update, error)) {
|
||||||
|
status.set(false);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
);
|
||||||
|
try {
|
||||||
|
if (enableConcurrent) {
|
||||||
|
executorService.invokeAll(callables);
|
||||||
|
} else {
|
||||||
|
for(Callable<Void> callable : callables) {
|
||||||
|
callable.call();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOG.error("Write record failed : {}", e.getMessage(), e);
|
||||||
|
throw new IOException(e);
|
||||||
}
|
}
|
||||||
long end = monotonicNow();
|
long end = monotonicNow();
|
||||||
if (status) {
|
if (status.get()) {
|
||||||
getMetrics().addWrite(end - start);
|
getMetrics().addWrite(end - start);
|
||||||
} else {
|
} else {
|
||||||
getMetrics().addFailure(end - start);
|
getMetrics().addFailure(end - start);
|
||||||
}
|
}
|
||||||
return status;
|
return status.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -377,6 +377,26 @@
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.federation.router.store.driver.zk.parent-path</name>
|
||||||
|
<value>/hdfs-federation</value>
|
||||||
|
<description>
|
||||||
|
The parent path of zookeeper for StateStoreZooKeeperImpl.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.federation.router.store.driver.zk.async.max.threads</name>
|
||||||
|
<value>-1</value>
|
||||||
|
<description>
|
||||||
|
Max threads number of StateStoreZooKeeperImpl in async mode.
|
||||||
|
The only class currently being supported:
|
||||||
|
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl.
|
||||||
|
Default value is -1, which means StateStoreZooKeeperImpl is working in sync mode.
|
||||||
|
Use positive integer value to enable async mode.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.federation.router.cache.ttl</name>
|
<name>dfs.federation.router.cache.ttl</name>
|
||||||
<value>1m</value>
|
<value>1m</value>
|
||||||
|
|
|
@ -390,6 +390,7 @@
|
||||||
<td></td>
|
<td></td>
|
||||||
<td></td>
|
<td></td>
|
||||||
<td></td>
|
<td></td>
|
||||||
|
<td></td>
|
||||||
</tr>
|
</tr>
|
||||||
{/DeadNodes}
|
{/DeadNodes}
|
||||||
</table>
|
</table>
|
||||||
|
|
|
@ -729,4 +729,41 @@ public class TestMountTableResolver {
|
||||||
assertEquals("2->/testInvalidateCache/foo", mountTable
|
assertEquals("2->/testInvalidateCache/foo", mountTable
|
||||||
.getDestinationForPath("/testInvalidateCache/foo").toString());
|
.getDestinationForPath("/testInvalidateCache/foo").toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test location cache hit when get destination for path.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testLocationCacheHitrate() throws Exception {
|
||||||
|
List<MountTable> entries = new ArrayList<>();
|
||||||
|
|
||||||
|
// Add entry and test location cache
|
||||||
|
Map<String, String> map1 = getMountTableEntry("1", "/testlocationcache");
|
||||||
|
MountTable entry1 = MountTable.newInstance("/testlocationcache", map1);
|
||||||
|
entries.add(entry1);
|
||||||
|
|
||||||
|
Map<String, String> map2 = getMountTableEntry("2",
|
||||||
|
"/anothertestlocationcache");
|
||||||
|
MountTable entry2 = MountTable.newInstance("/anothertestlocationcache",
|
||||||
|
map2);
|
||||||
|
entries.add(entry2);
|
||||||
|
|
||||||
|
mountTable.refreshEntries(entries);
|
||||||
|
mountTable.getLocCacheAccess().reset();
|
||||||
|
mountTable.getLocCacheMiss().reset();
|
||||||
|
assertEquals("1->/testlocationcache",
|
||||||
|
mountTable.getDestinationForPath("/testlocationcache").toString());
|
||||||
|
assertEquals("2->/anothertestlocationcache",
|
||||||
|
mountTable.getDestinationForPath("/anothertestlocationcache")
|
||||||
|
.toString());
|
||||||
|
|
||||||
|
assertEquals(2, mountTable.getLocCacheMiss().intValue());
|
||||||
|
assertEquals("1->/testlocationcache",
|
||||||
|
mountTable.getDestinationForPath("/testlocationcache").toString());
|
||||||
|
assertEquals(3, mountTable.getLocCacheAccess().intValue());
|
||||||
|
|
||||||
|
// Cleanup before exit
|
||||||
|
mountTable.removeEntry("/testlocationcache");
|
||||||
|
mountTable.removeEntry("/anothertestlocationcache");
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -26,6 +26,7 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RP
|
||||||
import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES;
|
import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES;
|
||||||
import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMESERVICES;
|
import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMESERVICES;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
import static org.junit.Assert.assertNull;
|
import static org.junit.Assert.assertNull;
|
||||||
|
|
||||||
|
@ -36,6 +37,7 @@ import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.router.SecurityConfUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.server.federation.MockResolver;
|
import org.apache.hadoop.hdfs.server.federation.MockResolver;
|
||||||
|
@ -44,6 +46,7 @@ import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeCon
|
||||||
import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
|
import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
|
||||||
import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
|
import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
|
||||||
import org.apache.hadoop.net.MockDomainNameResolver;
|
import org.apache.hadoop.net.MockDomainNameResolver;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.service.Service.STATE;
|
import org.apache.hadoop.service.Service.STATE;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
@ -318,4 +321,33 @@ public class TestRouterNamenodeHeartbeat {
|
||||||
|
|
||||||
return conf;
|
return conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNamenodeHeartbeatWithSecurity() throws Exception {
|
||||||
|
Configuration conf = SecurityConfUtil.initSecurity();
|
||||||
|
MiniRouterDFSCluster testCluster = null;
|
||||||
|
try {
|
||||||
|
testCluster = new MiniRouterDFSCluster(true, 1, conf);
|
||||||
|
// Start Namenodes and routers
|
||||||
|
testCluster.startCluster(conf);
|
||||||
|
testCluster.startRouters();
|
||||||
|
|
||||||
|
// Register Namenodes to generate a NamenodeStatusReport
|
||||||
|
testCluster.registerNamenodes();
|
||||||
|
testCluster.waitNamenodeRegistration();
|
||||||
|
|
||||||
|
for (MiniRouterDFSCluster.RouterContext routerContext : testCluster.getRouters()) {
|
||||||
|
ActiveNamenodeResolver resolver = routerContext.getRouter().getNamenodeResolver();
|
||||||
|
// Validate that NamenodeStatusReport has been registered
|
||||||
|
assertNotNull(resolver.getNamespaces());
|
||||||
|
assertFalse(resolver.getNamespaces().isEmpty());
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
if (testCluster != null) {
|
||||||
|
testCluster.shutdown();
|
||||||
|
}
|
||||||
|
UserGroupInformation.reset();
|
||||||
|
SecurityConfUtil.destroy();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2054,7 +2054,7 @@ public class TestRouterRpc {
|
||||||
@Test
|
@Test
|
||||||
public void testMkdirsWithCallerContext() throws IOException {
|
public void testMkdirsWithCallerContext() throws IOException {
|
||||||
GenericTestUtils.LogCapturer auditlog =
|
GenericTestUtils.LogCapturer auditlog =
|
||||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
|
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||||
|
|
||||||
// Current callerContext is null
|
// Current callerContext is null
|
||||||
assertNull(CallerContext.getCurrent());
|
assertNull(CallerContext.getCurrent());
|
||||||
|
@ -2092,7 +2092,7 @@ public class TestRouterRpc {
|
||||||
@Test
|
@Test
|
||||||
public void testAddClientIpPortToCallerContext() throws IOException {
|
public void testAddClientIpPortToCallerContext() throws IOException {
|
||||||
GenericTestUtils.LogCapturer auditLog =
|
GenericTestUtils.LogCapturer auditLog =
|
||||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
|
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||||
|
|
||||||
// 1. ClientIp and ClientPort are not set on the client.
|
// 1. ClientIp and ClientPort are not set on the client.
|
||||||
// Set client context.
|
// Set client context.
|
||||||
|
@ -2127,7 +2127,7 @@ public class TestRouterRpc {
|
||||||
@Test
|
@Test
|
||||||
public void testAddClientIdAndCallIdToCallerContext() throws IOException {
|
public void testAddClientIdAndCallIdToCallerContext() throws IOException {
|
||||||
GenericTestUtils.LogCapturer auditLog =
|
GenericTestUtils.LogCapturer auditLog =
|
||||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
|
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||||
|
|
||||||
// 1. ClientId and ClientCallId are not set on the client.
|
// 1. ClientId and ClientCallId are not set on the client.
|
||||||
// Set client context.
|
// Set client context.
|
||||||
|
|
|
@ -440,7 +440,7 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
|
||||||
@Test
|
@Test
|
||||||
public void testCallerContextWithMultiDestinations() throws IOException {
|
public void testCallerContextWithMultiDestinations() throws IOException {
|
||||||
GenericTestUtils.LogCapturer auditLog =
|
GenericTestUtils.LogCapturer auditLog =
|
||||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
|
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||||
|
|
||||||
// set client context
|
// set client context
|
||||||
CallerContext.setCurrent(
|
CallerContext.setCurrent(
|
||||||
|
|
|
@ -119,7 +119,7 @@ public class TestStateStoreDriverBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
private <T extends BaseRecord> T generateFakeRecord(Class<T> recordClass)
|
protected <T extends BaseRecord> T generateFakeRecord(Class<T> recordClass)
|
||||||
throws IllegalArgumentException, IllegalAccessException, IOException {
|
throws IllegalArgumentException, IllegalAccessException, IOException {
|
||||||
|
|
||||||
if (recordClass == MembershipState.class) {
|
if (recordClass == MembershipState.class) {
|
||||||
|
|
|
@ -18,12 +18,13 @@
|
||||||
package org.apache.hadoop.hdfs.server.federation.store.driver;
|
package org.apache.hadoop.hdfs.server.federation.store.driver;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
|
import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
|
||||||
import static org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl.FEDERATION_STORE_ZK_PARENT_PATH;
|
|
||||||
import static org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl.FEDERATION_STORE_ZK_PARENT_PATH_DEFAULT;
|
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
import static org.junit.Assert.assertNull;
|
import static org.junit.Assert.assertNull;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.curator.framework.CuratorFramework;
|
import org.apache.curator.framework.CuratorFramework;
|
||||||
|
@ -40,6 +41,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.DisabledNameservic
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
|
import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
|
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.records.RouterState;
|
import org.apache.hadoop.hdfs.server.federation.store.records.RouterState;
|
||||||
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.zookeeper.CreateMode;
|
import org.apache.zookeeper.CreateMode;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -73,9 +75,10 @@ public class TestStateStoreZK extends TestStateStoreDriverBase {
|
||||||
// Disable auto-repair of connection
|
// Disable auto-repair of connection
|
||||||
conf.setLong(RBFConfigKeys.FEDERATION_STORE_CONNECTION_TEST_MS,
|
conf.setLong(RBFConfigKeys.FEDERATION_STORE_CONNECTION_TEST_MS,
|
||||||
TimeUnit.HOURS.toMillis(1));
|
TimeUnit.HOURS.toMillis(1));
|
||||||
|
conf.setInt(RBFConfigKeys.FEDERATION_STORE_ZK_ASYNC_MAX_THREADS, 10);
|
||||||
|
|
||||||
baseZNode = conf.get(FEDERATION_STORE_ZK_PARENT_PATH,
|
baseZNode = conf.get(RBFConfigKeys.FEDERATION_STORE_ZK_PARENT_PATH,
|
||||||
FEDERATION_STORE_ZK_PARENT_PATH_DEFAULT);
|
RBFConfigKeys.FEDERATION_STORE_ZK_PARENT_PATH_DEFAULT);
|
||||||
getStateStore(conf);
|
getStateStore(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,6 +94,8 @@ public class TestStateStoreZK extends TestStateStoreDriverBase {
|
||||||
@Before
|
@Before
|
||||||
public void startup() throws IOException {
|
public void startup() throws IOException {
|
||||||
removeAll(getStateStoreDriver());
|
removeAll(getStateStoreDriver());
|
||||||
|
StateStoreZooKeeperImpl stateStoreZooKeeper = (StateStoreZooKeeperImpl) getStateStoreDriver();
|
||||||
|
stateStoreZooKeeper.setEnableConcurrent(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
private <T extends BaseRecord> String generateFakeZNode(
|
private <T extends BaseRecord> String generateFakeZNode(
|
||||||
|
@ -126,33 +131,79 @@ public class TestStateStoreZK extends TestStateStoreDriverBase {
|
||||||
assertNull(curatorFramework.checkExists().forPath(znode));
|
assertNull(curatorFramework.checkExists().forPath(znode));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAsyncPerformance() throws Exception {
|
||||||
|
StateStoreZooKeeperImpl stateStoreDriver = (StateStoreZooKeeperImpl) getStateStoreDriver();
|
||||||
|
List<MountTable> insertList = new ArrayList<>();
|
||||||
|
for (int i = 0; i < 1000; i++) {
|
||||||
|
MountTable newRecord = generateFakeRecord(MountTable.class);
|
||||||
|
insertList.add(newRecord);
|
||||||
|
}
|
||||||
|
// Insert Multiple on sync mode
|
||||||
|
long startSync = Time.now();
|
||||||
|
stateStoreDriver.putAll(insertList, true, false);
|
||||||
|
long endSync = Time.now();
|
||||||
|
stateStoreDriver.removeAll(MembershipState.class);
|
||||||
|
|
||||||
|
stateStoreDriver.setEnableConcurrent(true);
|
||||||
|
// Insert Multiple on async mode
|
||||||
|
long startAsync = Time.now();
|
||||||
|
stateStoreDriver.putAll(insertList, true, false);
|
||||||
|
long endAsync = Time.now();
|
||||||
|
assertTrue((endSync - startSync) > (endAsync - startAsync));
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetNullRecord() throws Exception {
|
public void testGetNullRecord() throws Exception {
|
||||||
testGetNullRecord(getStateStoreDriver());
|
StateStoreZooKeeperImpl stateStoreDriver = (StateStoreZooKeeperImpl) getStateStoreDriver();
|
||||||
|
testGetNullRecord(stateStoreDriver);
|
||||||
|
|
||||||
|
// test async mode
|
||||||
|
stateStoreDriver.setEnableConcurrent(true);
|
||||||
|
testGetNullRecord(stateStoreDriver);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testInsert()
|
public void testInsert()
|
||||||
throws IllegalArgumentException, IllegalAccessException, IOException {
|
throws IllegalArgumentException, IllegalAccessException, IOException {
|
||||||
testInsert(getStateStoreDriver());
|
StateStoreZooKeeperImpl stateStoreDriver = (StateStoreZooKeeperImpl) getStateStoreDriver();
|
||||||
|
testInsert(stateStoreDriver);
|
||||||
|
// test async mode
|
||||||
|
stateStoreDriver.setEnableConcurrent(true);
|
||||||
|
testInsert(stateStoreDriver);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testUpdate()
|
public void testUpdate()
|
||||||
throws IllegalArgumentException, ReflectiveOperationException,
|
throws IllegalArgumentException, ReflectiveOperationException,
|
||||||
IOException, SecurityException {
|
IOException, SecurityException {
|
||||||
testPut(getStateStoreDriver());
|
StateStoreZooKeeperImpl stateStoreDriver = (StateStoreZooKeeperImpl) getStateStoreDriver();
|
||||||
|
testPut(stateStoreDriver);
|
||||||
|
|
||||||
|
// test async mode
|
||||||
|
stateStoreDriver.setEnableConcurrent(true);
|
||||||
|
testPut(stateStoreDriver);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDelete()
|
public void testDelete()
|
||||||
throws IllegalArgumentException, IllegalAccessException, IOException {
|
throws IllegalArgumentException, IllegalAccessException, IOException {
|
||||||
testRemove(getStateStoreDriver());
|
StateStoreZooKeeperImpl stateStoreDriver = (StateStoreZooKeeperImpl) getStateStoreDriver();
|
||||||
|
testRemove(stateStoreDriver);
|
||||||
|
|
||||||
|
// test async mode
|
||||||
|
stateStoreDriver.setEnableConcurrent(true);
|
||||||
|
testRemove(stateStoreDriver);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testFetchErrors()
|
public void testFetchErrors()
|
||||||
throws IllegalArgumentException, IllegalAccessException, IOException {
|
throws IllegalArgumentException, IllegalAccessException, IOException {
|
||||||
testFetchErrors(getStateStoreDriver());
|
StateStoreZooKeeperImpl stateStoreDriver = (StateStoreZooKeeperImpl) getStateStoreDriver();
|
||||||
|
testFetchErrors(stateStoreDriver);
|
||||||
|
|
||||||
|
// test async mode
|
||||||
|
stateStoreDriver.setEnableConcurrent(true);
|
||||||
|
testFetchErrors(stateStoreDriver);
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -117,11 +117,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<artifactId>commons-io</artifactId>
|
<artifactId>commons-io</artifactId>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>commons-logging</groupId>
|
|
||||||
<artifactId>commons-logging</artifactId>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>commons-daemon</groupId>
|
<groupId>commons-daemon</groupId>
|
||||||
<artifactId>commons-daemon</artifactId>
|
<artifactId>commons-daemon</artifactId>
|
||||||
|
@ -152,6 +147,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<groupId>xml-apis</groupId>
|
<groupId>xml-apis</groupId>
|
||||||
<artifactId>xml-apis</artifactId>
|
<artifactId>xml-apis</artifactId>
|
||||||
</exclusion>
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>xerces</groupId>
|
||||||
|
<artifactId>xercesImpl</artifactId>
|
||||||
|
</exclusion>
|
||||||
</exclusions>
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
@ -175,11 +174,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<artifactId>slf4j-log4j12</artifactId>
|
<artifactId>slf4j-log4j12</artifactId>
|
||||||
<scope>provided</scope>
|
<scope>provided</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>io.netty</groupId>
|
|
||||||
<artifactId>netty</artifactId>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.netty</groupId>
|
<groupId>io.netty</groupId>
|
||||||
<artifactId>netty-all</artifactId>
|
<artifactId>netty-all</artifactId>
|
||||||
|
|
|
@ -395,12 +395,12 @@ public class BlockManager implements BlockStatsMXBean {
|
||||||
* The maximum number of outgoing replication streams a given node should have
|
* The maximum number of outgoing replication streams a given node should have
|
||||||
* at one time considering all but the highest priority replications needed.
|
* at one time considering all but the highest priority replications needed.
|
||||||
*/
|
*/
|
||||||
int maxReplicationStreams;
|
private volatile int maxReplicationStreams;
|
||||||
/**
|
/**
|
||||||
* The maximum number of outgoing replication streams a given node should have
|
* The maximum number of outgoing replication streams a given node should have
|
||||||
* at one time.
|
* at one time.
|
||||||
*/
|
*/
|
||||||
int replicationStreamsHardLimit;
|
private volatile int replicationStreamsHardLimit;
|
||||||
/** Minimum copies needed or else write is disallowed */
|
/** Minimum copies needed or else write is disallowed */
|
||||||
public final short minReplication;
|
public final short minReplication;
|
||||||
/** Default number of replicas */
|
/** Default number of replicas */
|
||||||
|
@ -409,7 +409,7 @@ public class BlockManager implements BlockStatsMXBean {
|
||||||
final int maxCorruptFilesReturned;
|
final int maxCorruptFilesReturned;
|
||||||
|
|
||||||
final float blocksInvalidateWorkPct;
|
final float blocksInvalidateWorkPct;
|
||||||
private int blocksReplWorkMultiplier;
|
private volatile int blocksReplWorkMultiplier;
|
||||||
|
|
||||||
// whether or not to issue block encryption keys.
|
// whether or not to issue block encryption keys.
|
||||||
final boolean encryptDataTransfer;
|
final boolean encryptDataTransfer;
|
||||||
|
@ -1017,12 +1017,19 @@ public class BlockManager implements BlockStatsMXBean {
|
||||||
*
|
*
|
||||||
* @param newVal - Must be a positive non-zero integer.
|
* @param newVal - Must be a positive non-zero integer.
|
||||||
*/
|
*/
|
||||||
public void setMaxReplicationStreams(int newVal) {
|
@VisibleForTesting
|
||||||
ensurePositiveInt(newVal,
|
public void setMaxReplicationStreams(int newVal, boolean ensurePositiveInt) {
|
||||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY);
|
if (ensurePositiveInt) {
|
||||||
|
ensurePositiveInt(newVal,
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY);
|
||||||
|
}
|
||||||
maxReplicationStreams = newVal;
|
maxReplicationStreams = newVal;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void setMaxReplicationStreams(int newVal) {
|
||||||
|
setMaxReplicationStreams(newVal, true);
|
||||||
|
}
|
||||||
|
|
||||||
/** Returns the current setting for maxReplicationStreamsHardLimit, set by
|
/** Returns the current setting for maxReplicationStreamsHardLimit, set by
|
||||||
* {@code DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY}.
|
* {@code DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY}.
|
||||||
*
|
*
|
||||||
|
|
|
@ -59,7 +59,7 @@ class PendingReconstructionBlocks {
|
||||||
// It might take anywhere between 5 to 10 minutes before
|
// It might take anywhere between 5 to 10 minutes before
|
||||||
// a request is timed out.
|
// a request is timed out.
|
||||||
//
|
//
|
||||||
private long timeout =
|
private volatile long timeout =
|
||||||
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT * 1000;
|
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT * 1000;
|
||||||
private final static long DEFAULT_RECHECK_INTERVAL = 5 * 60 * 1000;
|
private final static long DEFAULT_RECHECK_INTERVAL = 5 * 60 * 1000;
|
||||||
|
|
||||||
|
|
|
@ -31,8 +31,6 @@ import javax.management.MBeanServer;
|
||||||
import javax.management.MalformedObjectNameException;
|
import javax.management.MalformedObjectNameException;
|
||||||
import javax.management.ObjectName;
|
import javax.management.ObjectName;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.apache.hadoop.metrics2.util.MBeans;
|
import org.apache.hadoop.metrics2.util.MBeans;
|
||||||
|
@ -58,13 +56,12 @@ public class MetricsLoggerTask implements Runnable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private Log metricsLog;
|
private org.apache.log4j.Logger metricsLog;
|
||||||
private String nodeName;
|
private String nodeName;
|
||||||
private short maxLogLineLength;
|
private short maxLogLineLength;
|
||||||
|
|
||||||
public MetricsLoggerTask(Log metricsLog, String nodeName,
|
public MetricsLoggerTask(String metricsLog, String nodeName, short maxLogLineLength) {
|
||||||
short maxLogLineLength) {
|
this.metricsLog = org.apache.log4j.Logger.getLogger(metricsLog);
|
||||||
this.metricsLog = metricsLog;
|
|
||||||
this.nodeName = nodeName;
|
this.nodeName = nodeName;
|
||||||
this.maxLogLineLength = maxLogLineLength;
|
this.maxLogLineLength = maxLogLineLength;
|
||||||
}
|
}
|
||||||
|
@ -118,13 +115,8 @@ public class MetricsLoggerTask implements Runnable {
|
||||||
.substring(0, maxLogLineLength) + "...");
|
.substring(0, maxLogLineLength) + "...");
|
||||||
}
|
}
|
||||||
|
|
||||||
private static boolean hasAppenders(Log logger) {
|
private static boolean hasAppenders(org.apache.log4j.Logger logger) {
|
||||||
if (!(logger instanceof Log4JLogger)) {
|
return logger.getAllAppenders().hasMoreElements();
|
||||||
// Don't bother trying to determine the presence of appenders.
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
Log4JLogger log4JLogger = ((Log4JLogger) logger);
|
|
||||||
return log4JLogger.getLogger().getAllAppenders().hasMoreElements();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -150,13 +142,8 @@ public class MetricsLoggerTask implements Runnable {
|
||||||
* Make the metrics logger async and add all pre-existing appenders to the
|
* Make the metrics logger async and add all pre-existing appenders to the
|
||||||
* async appender.
|
* async appender.
|
||||||
*/
|
*/
|
||||||
public static void makeMetricsLoggerAsync(Log metricsLog) {
|
public static void makeMetricsLoggerAsync(String metricsLog) {
|
||||||
if (!(metricsLog instanceof Log4JLogger)) {
|
org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(metricsLog);
|
||||||
LOG.warn("Metrics logging will not be async since "
|
|
||||||
+ "the logger is not log4j");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
org.apache.log4j.Logger logger = ((Log4JLogger) metricsLog).getLogger();
|
|
||||||
logger.setAdditivity(false); // Don't pollute actual logs with metrics dump
|
logger.setAdditivity(false); // Don't pollute actual logs with metrics dump
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
|
|
|
@ -679,15 +679,20 @@ class BPOfferService {
|
||||||
actor.reRegister();
|
actor.reRegister();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
writeLock();
|
boolean isActiveActor;
|
||||||
|
InetSocketAddress nnSocketAddress;
|
||||||
|
readLock();
|
||||||
try {
|
try {
|
||||||
if (actor == bpServiceToActive) {
|
isActiveActor = (actor == bpServiceToActive);
|
||||||
return processCommandFromActive(cmd, actor);
|
nnSocketAddress = actor.getNNSocketAddress();
|
||||||
} else {
|
|
||||||
return processCommandFromStandby(cmd, actor);
|
|
||||||
}
|
|
||||||
} finally {
|
} finally {
|
||||||
writeUnlock();
|
readUnlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isActiveActor) {
|
||||||
|
return processCommandFromActive(cmd, nnSocketAddress);
|
||||||
|
} else {
|
||||||
|
return processCommandFromStandby(cmd, nnSocketAddress);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -715,7 +720,7 @@ class BPOfferService {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private boolean processCommandFromActive(DatanodeCommand cmd,
|
private boolean processCommandFromActive(DatanodeCommand cmd,
|
||||||
BPServiceActor actor) throws IOException {
|
InetSocketAddress nnSocketAddress) throws IOException {
|
||||||
final BlockCommand bcmd =
|
final BlockCommand bcmd =
|
||||||
cmd instanceof BlockCommand? (BlockCommand)cmd: null;
|
cmd instanceof BlockCommand? (BlockCommand)cmd: null;
|
||||||
final BlockIdCommand blockIdCmd =
|
final BlockIdCommand blockIdCmd =
|
||||||
|
@ -768,7 +773,7 @@ class BPOfferService {
|
||||||
dn.finalizeUpgradeForPool(bp);
|
dn.finalizeUpgradeForPool(bp);
|
||||||
break;
|
break;
|
||||||
case DatanodeProtocol.DNA_RECOVERBLOCK:
|
case DatanodeProtocol.DNA_RECOVERBLOCK:
|
||||||
String who = "NameNode at " + actor.getNNSocketAddress();
|
String who = "NameNode at " + nnSocketAddress;
|
||||||
dn.getBlockRecoveryWorker().recoverBlocks(who,
|
dn.getBlockRecoveryWorker().recoverBlocks(who,
|
||||||
((BlockRecoveryCommand)cmd).getRecoveringBlocks());
|
((BlockRecoveryCommand)cmd).getRecoveringBlocks());
|
||||||
break;
|
break;
|
||||||
|
@ -810,11 +815,11 @@ class BPOfferService {
|
||||||
* DNA_REGISTER which should be handled earlier itself.
|
* DNA_REGISTER which should be handled earlier itself.
|
||||||
*/
|
*/
|
||||||
private boolean processCommandFromStandby(DatanodeCommand cmd,
|
private boolean processCommandFromStandby(DatanodeCommand cmd,
|
||||||
BPServiceActor actor) throws IOException {
|
InetSocketAddress nnSocketAddress) throws IOException {
|
||||||
switch(cmd.getAction()) {
|
switch(cmd.getAction()) {
|
||||||
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
|
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
|
||||||
LOG.info("DatanodeCommand action from standby NN {}: DNA_ACCESSKEYUPDATE",
|
LOG.info("DatanodeCommand action from standby NN {}: DNA_ACCESSKEYUPDATE",
|
||||||
actor.getNNSocketAddress());
|
nnSocketAddress);
|
||||||
if (dn.isBlockTokenEnabled) {
|
if (dn.isBlockTokenEnabled) {
|
||||||
dn.blockPoolTokenSecretManager.addKeys(
|
dn.blockPoolTokenSecretManager.addKeys(
|
||||||
getBlockPoolId(),
|
getBlockPoolId(),
|
||||||
|
@ -831,11 +836,11 @@ class BPOfferService {
|
||||||
case DatanodeProtocol.DNA_UNCACHE:
|
case DatanodeProtocol.DNA_UNCACHE:
|
||||||
case DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION:
|
case DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION:
|
||||||
LOG.warn("Got a command from standby NN {} - ignoring command: {}",
|
LOG.warn("Got a command from standby NN {} - ignoring command: {}",
|
||||||
actor.getNNSocketAddress(), cmd.getAction());
|
nnSocketAddress, cmd.getAction());
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
LOG.warn("Unknown DatanodeCommand action: {} from standby NN {}",
|
LOG.warn("Unknown DatanodeCommand action: {} from standby NN {}",
|
||||||
cmd.getAction(), actor.getNNSocketAddress());
|
cmd.getAction(), nnSocketAddress);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -202,10 +202,13 @@ class BPServiceActor implements Runnable {
|
||||||
Map<String, String> getActorInfoMap() {
|
Map<String, String> getActorInfoMap() {
|
||||||
final Map<String, String> info = new HashMap<String, String>();
|
final Map<String, String> info = new HashMap<String, String>();
|
||||||
info.put("NamenodeAddress", getNameNodeAddress());
|
info.put("NamenodeAddress", getNameNodeAddress());
|
||||||
|
info.put("NamenodeHaState", state != null ? state.toString() : "Unknown");
|
||||||
info.put("BlockPoolID", bpos.getBlockPoolId());
|
info.put("BlockPoolID", bpos.getBlockPoolId());
|
||||||
info.put("ActorState", getRunningState());
|
info.put("ActorState", getRunningState());
|
||||||
info.put("LastHeartbeat",
|
info.put("LastHeartbeat",
|
||||||
String.valueOf(getScheduler().getLastHearbeatTime()));
|
String.valueOf(getScheduler().getLastHearbeatTime()));
|
||||||
|
info.put("LastHeartbeatResponseTime",
|
||||||
|
String.valueOf(getScheduler().getLastHeartbeatResponseTime()));
|
||||||
info.put("LastBlockReport",
|
info.put("LastBlockReport",
|
||||||
String.valueOf(getScheduler().getLastBlockReportTime()));
|
String.valueOf(getScheduler().getLastBlockReportTime()));
|
||||||
info.put("maxBlockReportSize", String.valueOf(getMaxBlockReportSize()));
|
info.put("maxBlockReportSize", String.valueOf(getMaxBlockReportSize()));
|
||||||
|
@ -579,6 +582,8 @@ class BPServiceActor implements Runnable {
|
||||||
slowPeers,
|
slowPeers,
|
||||||
slowDisks);
|
slowDisks);
|
||||||
|
|
||||||
|
scheduler.updateLastHeartbeatResponseTime(monotonicNow());
|
||||||
|
|
||||||
if (outliersReportDue) {
|
if (outliersReportDue) {
|
||||||
// If the report was due and successfully sent, schedule the next one.
|
// If the report was due and successfully sent, schedule the next one.
|
||||||
scheduler.scheduleNextOutlierReport();
|
scheduler.scheduleNextOutlierReport();
|
||||||
|
@ -697,6 +702,8 @@ class BPServiceActor implements Runnable {
|
||||||
// Every so often, send heartbeat or block-report
|
// Every so often, send heartbeat or block-report
|
||||||
//
|
//
|
||||||
final boolean sendHeartbeat = scheduler.isHeartbeatDue(startTime);
|
final boolean sendHeartbeat = scheduler.isHeartbeatDue(startTime);
|
||||||
|
LOG.debug("BP offer service run start time: {}, sendHeartbeat: {}", startTime,
|
||||||
|
sendHeartbeat);
|
||||||
HeartbeatResponse resp = null;
|
HeartbeatResponse resp = null;
|
||||||
if (sendHeartbeat) {
|
if (sendHeartbeat) {
|
||||||
//
|
//
|
||||||
|
@ -709,6 +716,8 @@ class BPServiceActor implements Runnable {
|
||||||
boolean requestBlockReportLease = (fullBlockReportLeaseId == 0) &&
|
boolean requestBlockReportLease = (fullBlockReportLeaseId == 0) &&
|
||||||
scheduler.isBlockReportDue(startTime);
|
scheduler.isBlockReportDue(startTime);
|
||||||
if (!dn.areHeartbeatsDisabledForTests()) {
|
if (!dn.areHeartbeatsDisabledForTests()) {
|
||||||
|
LOG.debug("Before sending heartbeat to namenode {}, the state of the namenode known"
|
||||||
|
+ " to datanode so far is {}", this.getNameNodeAddress(), state);
|
||||||
resp = sendHeartBeat(requestBlockReportLease);
|
resp = sendHeartBeat(requestBlockReportLease);
|
||||||
assert resp != null;
|
assert resp != null;
|
||||||
if (resp.getFullBlockReportLeaseId() != 0) {
|
if (resp.getFullBlockReportLeaseId() != 0) {
|
||||||
|
@ -733,7 +742,12 @@ class BPServiceActor implements Runnable {
|
||||||
// that we should actually process.
|
// that we should actually process.
|
||||||
bpos.updateActorStatesFromHeartbeat(
|
bpos.updateActorStatesFromHeartbeat(
|
||||||
this, resp.getNameNodeHaState());
|
this, resp.getNameNodeHaState());
|
||||||
state = resp.getNameNodeHaState().getState();
|
HAServiceState stateFromResp = resp.getNameNodeHaState().getState();
|
||||||
|
if (state != stateFromResp) {
|
||||||
|
LOG.info("After receiving heartbeat response, updating state of namenode {} to {}",
|
||||||
|
this.getNameNodeAddress(), stateFromResp);
|
||||||
|
}
|
||||||
|
state = stateFromResp;
|
||||||
|
|
||||||
if (state == HAServiceState.ACTIVE) {
|
if (state == HAServiceState.ACTIVE) {
|
||||||
handleRollingUpgradeStatus(resp);
|
handleRollingUpgradeStatus(resp);
|
||||||
|
@ -794,6 +808,7 @@ class BPServiceActor implements Runnable {
|
||||||
long sleepTime = Math.min(1000, dnConf.heartBeatInterval);
|
long sleepTime = Math.min(1000, dnConf.heartBeatInterval);
|
||||||
Thread.sleep(sleepTime);
|
Thread.sleep(sleepTime);
|
||||||
} catch (InterruptedException ie) {
|
} catch (InterruptedException ie) {
|
||||||
|
LOG.info("BPServiceActor {} is interrupted", this);
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -995,6 +1010,8 @@ class BPServiceActor implements Runnable {
|
||||||
while (!duplicateQueue.isEmpty()) {
|
while (!duplicateQueue.isEmpty()) {
|
||||||
BPServiceActorAction actionItem = duplicateQueue.remove();
|
BPServiceActorAction actionItem = duplicateQueue.remove();
|
||||||
try {
|
try {
|
||||||
|
LOG.debug("BPServiceActor ( {} ) processing queued messages. Action item: {}", this,
|
||||||
|
actionItem);
|
||||||
actionItem.reportTo(bpNamenode, bpRegistration);
|
actionItem.reportTo(bpNamenode, bpRegistration);
|
||||||
} catch (BPServiceActorActionException baae) {
|
} catch (BPServiceActorActionException baae) {
|
||||||
LOG.warn(baae.getMessage() + nnAddr , baae);
|
LOG.warn(baae.getMessage() + nnAddr , baae);
|
||||||
|
@ -1189,6 +1206,9 @@ class BPServiceActor implements Runnable {
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
volatile long lastHeartbeatTime = monotonicNow();
|
volatile long lastHeartbeatTime = monotonicNow();
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
private volatile long lastHeartbeatResponseTime = -1;
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
boolean resetBlockReportTime = true;
|
boolean resetBlockReportTime = true;
|
||||||
|
|
||||||
|
@ -1237,6 +1257,10 @@ class BPServiceActor implements Runnable {
|
||||||
lastHeartbeatTime = heartbeatTime;
|
lastHeartbeatTime = heartbeatTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void updateLastHeartbeatResponseTime(long heartbeatTime) {
|
||||||
|
this.lastHeartbeatResponseTime = heartbeatTime;
|
||||||
|
}
|
||||||
|
|
||||||
void updateLastBlockReportTime(long blockReportTime) {
|
void updateLastBlockReportTime(long blockReportTime) {
|
||||||
lastBlockReportTime = blockReportTime;
|
lastBlockReportTime = blockReportTime;
|
||||||
}
|
}
|
||||||
|
@ -1249,6 +1273,10 @@ class BPServiceActor implements Runnable {
|
||||||
return (monotonicNow() - lastHeartbeatTime)/1000;
|
return (monotonicNow() - lastHeartbeatTime)/1000;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private long getLastHeartbeatResponseTime() {
|
||||||
|
return (monotonicNow() - lastHeartbeatResponseTime) / 1000;
|
||||||
|
}
|
||||||
|
|
||||||
long getLastBlockReportTime() {
|
long getLastBlockReportTime() {
|
||||||
return (monotonicNow() - lastBlockReportTime)/1000;
|
return (monotonicNow() - lastBlockReportTime)/1000;
|
||||||
}
|
}
|
||||||
|
@ -1471,7 +1499,7 @@ class BPServiceActor implements Runnable {
|
||||||
dn.getMetrics().addNumProcessedCommands(processCommandsMs);
|
dn.getMetrics().addNumProcessedCommands(processCommandsMs);
|
||||||
}
|
}
|
||||||
if (processCommandsMs > dnConf.getProcessCommandsThresholdMs()) {
|
if (processCommandsMs > dnConf.getProcessCommandsThresholdMs()) {
|
||||||
LOG.info("Took {} ms to process {} commands from NN",
|
LOG.warn("Took {} ms to process {} commands from NN",
|
||||||
processCommandsMs, cmds.length);
|
processCommandsMs, cmds.length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,6 @@ import java.util.Queue;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.zip.Checksum;
|
import java.util.zip.Checksum;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.hadoop.fs.ChecksumException;
|
import org.apache.hadoop.fs.ChecksumException;
|
||||||
import org.apache.hadoop.fs.FSOutputSummer;
|
import org.apache.hadoop.fs.FSOutputSummer;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
|
@ -73,7 +72,7 @@ import org.slf4j.Logger;
|
||||||
**/
|
**/
|
||||||
class BlockReceiver implements Closeable {
|
class BlockReceiver implements Closeable {
|
||||||
public static final Logger LOG = DataNode.LOG;
|
public static final Logger LOG = DataNode.LOG;
|
||||||
static final Log ClientTraceLog = DataNode.ClientTraceLog;
|
static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG;
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
static long CACHE_DROP_LAG_BYTES = 8 * 1024 * 1024;
|
static long CACHE_DROP_LAG_BYTES = 8 * 1024 * 1024;
|
||||||
|
@ -1402,7 +1401,7 @@ class BlockReceiver implements Closeable {
|
||||||
public void run() {
|
public void run() {
|
||||||
datanode.metrics.incrDataNodePacketResponderCount();
|
datanode.metrics.incrDataNodePacketResponderCount();
|
||||||
boolean lastPacketInBlock = false;
|
boolean lastPacketInBlock = false;
|
||||||
final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
|
final long startTime = CLIENT_TRACE_LOG.isInfoEnabled() ? System.nanoTime() : 0;
|
||||||
while (isRunning() && !lastPacketInBlock) {
|
while (isRunning() && !lastPacketInBlock) {
|
||||||
long totalAckTimeNanos = 0;
|
long totalAckTimeNanos = 0;
|
||||||
boolean isInterrupted = false;
|
boolean isInterrupted = false;
|
||||||
|
@ -1557,7 +1556,7 @@ class BlockReceiver implements Closeable {
|
||||||
// Hold a volume reference to finalize block.
|
// Hold a volume reference to finalize block.
|
||||||
try (ReplicaHandler handler = BlockReceiver.this.claimReplicaHandler()) {
|
try (ReplicaHandler handler = BlockReceiver.this.claimReplicaHandler()) {
|
||||||
BlockReceiver.this.close();
|
BlockReceiver.this.close();
|
||||||
endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
|
endTime = CLIENT_TRACE_LOG.isInfoEnabled() ? System.nanoTime() : 0;
|
||||||
block.setNumBytes(replicaInfo.getNumBytes());
|
block.setNumBytes(replicaInfo.getNumBytes());
|
||||||
datanode.data.finalizeBlock(block, dirSyncOnFinalize);
|
datanode.data.finalizeBlock(block, dirSyncOnFinalize);
|
||||||
}
|
}
|
||||||
|
@ -1568,11 +1567,11 @@ class BlockReceiver implements Closeable {
|
||||||
|
|
||||||
datanode.closeBlock(block, null, replicaInfo.getStorageUuid(),
|
datanode.closeBlock(block, null, replicaInfo.getStorageUuid(),
|
||||||
replicaInfo.isOnTransientStorage());
|
replicaInfo.isOnTransientStorage());
|
||||||
if (ClientTraceLog.isInfoEnabled() && isClient) {
|
if (CLIENT_TRACE_LOG.isInfoEnabled() && isClient) {
|
||||||
long offset = 0;
|
long offset = 0;
|
||||||
DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block
|
DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block
|
||||||
.getBlockPoolId());
|
.getBlockPoolId());
|
||||||
ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr,
|
CLIENT_TRACE_LOG.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr,
|
||||||
myAddr, replicaInfo.getVolume(), block.getNumBytes(),
|
myAddr, replicaInfo.getVolume(), block.getNumBytes(),
|
||||||
"HDFS_WRITE", clientname, offset, dnR.getDatanodeUuid(),
|
"HDFS_WRITE", clientname, offset, dnR.getDatanodeUuid(),
|
||||||
block, endTime - startTime));
|
block, endTime - startTime));
|
||||||
|
|
|
@ -32,7 +32,6 @@ import java.nio.channels.FileChannel;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.hadoop.fs.ChecksumException;
|
import org.apache.hadoop.fs.ChecksumException;
|
||||||
import org.apache.hadoop.fs.FsTracer;
|
import org.apache.hadoop.fs.FsTracer;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
|
@ -103,7 +102,7 @@ import org.slf4j.Logger;
|
||||||
*/
|
*/
|
||||||
class BlockSender implements java.io.Closeable {
|
class BlockSender implements java.io.Closeable {
|
||||||
static final Logger LOG = DataNode.LOG;
|
static final Logger LOG = DataNode.LOG;
|
||||||
static final Log ClientTraceLog = DataNode.ClientTraceLog;
|
static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG;
|
||||||
private static final boolean is32Bit =
|
private static final boolean is32Bit =
|
||||||
System.getProperty("sun.arch.data.model").equals("32");
|
System.getProperty("sun.arch.data.model").equals("32");
|
||||||
/**
|
/**
|
||||||
|
@ -784,7 +783,7 @@ class BlockSender implements java.io.Closeable {
|
||||||
// Trigger readahead of beginning of file if configured.
|
// Trigger readahead of beginning of file if configured.
|
||||||
manageOsCache();
|
manageOsCache();
|
||||||
|
|
||||||
final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0;
|
final long startTime = CLIENT_TRACE_LOG.isDebugEnabled() ? System.nanoTime() : 0;
|
||||||
try {
|
try {
|
||||||
int maxChunksPerPacket;
|
int maxChunksPerPacket;
|
||||||
int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN;
|
int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN;
|
||||||
|
@ -831,9 +830,9 @@ class BlockSender implements java.io.Closeable {
|
||||||
sentEntireByteRange = true;
|
sentEntireByteRange = true;
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) {
|
if ((clientTraceFmt != null) && CLIENT_TRACE_LOG.isDebugEnabled()) {
|
||||||
final long endTime = System.nanoTime();
|
final long endTime = System.nanoTime();
|
||||||
ClientTraceLog.debug(String.format(clientTraceFmt, totalRead,
|
CLIENT_TRACE_LOG.debug(String.format(clientTraceFmt, totalRead,
|
||||||
initialOffset, endTime - startTime));
|
initialOffset, endTime - startTime));
|
||||||
}
|
}
|
||||||
close();
|
close();
|
||||||
|
|
|
@ -140,8 +140,6 @@ import javax.annotation.Nullable;
|
||||||
import javax.management.ObjectName;
|
import javax.management.ObjectName;
|
||||||
import javax.net.SocketFactory;
|
import javax.net.SocketFactory;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -319,8 +317,8 @@ public class DataNode extends ReconfigurableBase
|
||||||
", blockid: %s" + // block id
|
", blockid: %s" + // block id
|
||||||
", duration(ns): %s"; // duration time
|
", duration(ns): %s"; // duration time
|
||||||
|
|
||||||
static final Log ClientTraceLog =
|
static final Logger CLIENT_TRACE_LOG =
|
||||||
LogFactory.getLog(DataNode.class.getName() + ".clienttrace");
|
LoggerFactory.getLogger(DataNode.class.getName() + ".clienttrace");
|
||||||
|
|
||||||
private static final String USAGE =
|
private static final String USAGE =
|
||||||
"Usage: hdfs datanode [-regular | -rollback | -rollingupgrade rollback" +
|
"Usage: hdfs datanode [-regular | -rollback | -rollingupgrade rollback" +
|
||||||
|
@ -360,7 +358,7 @@ public class DataNode extends ReconfigurableBase
|
||||||
FS_GETSPACEUSED_JITTER_KEY,
|
FS_GETSPACEUSED_JITTER_KEY,
|
||||||
FS_GETSPACEUSED_CLASSNAME));
|
FS_GETSPACEUSED_CLASSNAME));
|
||||||
|
|
||||||
public static final Log METRICS_LOG = LogFactory.getLog("DataNodeMetricsLog");
|
public static final String METRICS_LOG_NAME = "DataNodeMetricsLog";
|
||||||
|
|
||||||
private static final String DATANODE_HTRACE_PREFIX = "datanode.htrace.";
|
private static final String DATANODE_HTRACE_PREFIX = "datanode.htrace.";
|
||||||
private final FileIoProvider fileIoProvider;
|
private final FileIoProvider fileIoProvider;
|
||||||
|
@ -3621,8 +3619,12 @@ public class DataNode extends ReconfigurableBase
|
||||||
*/
|
*/
|
||||||
@Override // DataNodeMXBean
|
@Override // DataNodeMXBean
|
||||||
public String getBPServiceActorInfo() {
|
public String getBPServiceActorInfo() {
|
||||||
final ArrayList<Map<String, String>> infoArray =
|
return JSON.toString(getBPServiceActorInfoMap());
|
||||||
new ArrayList<Map<String, String>>();
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
public List<Map<String, String>> getBPServiceActorInfoMap() {
|
||||||
|
final List<Map<String, String>> infoArray = new ArrayList<>();
|
||||||
for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
|
for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
|
||||||
if (bpos != null) {
|
if (bpos != null) {
|
||||||
for (BPServiceActor actor : bpos.getBPServiceActors()) {
|
for (BPServiceActor actor : bpos.getBPServiceActors()) {
|
||||||
|
@ -3630,7 +3632,7 @@ public class DataNode extends ReconfigurableBase
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return JSON.toString(infoArray);
|
return infoArray;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -3825,6 +3827,29 @@ public class DataNode extends ReconfigurableBase
|
||||||
* @return true - if the data node is fully started
|
* @return true - if the data node is fully started
|
||||||
*/
|
*/
|
||||||
public boolean isDatanodeFullyStarted() {
|
public boolean isDatanodeFullyStarted() {
|
||||||
|
return isDatanodeFullyStarted(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A datanode is considered to be fully started if all the BP threads are
|
||||||
|
* alive and all the block pools are initialized. If checkConnectionToActiveNamenode is true,
|
||||||
|
* the datanode is considered to be fully started if it is also heartbeating to
|
||||||
|
* active namenode in addition to the above-mentioned conditions.
|
||||||
|
*
|
||||||
|
* @param checkConnectionToActiveNamenode if true, performs additional check of whether datanode
|
||||||
|
* is heartbeating to active namenode.
|
||||||
|
* @return true if the datanode is fully started and also conditionally connected to active
|
||||||
|
* namenode, false otherwise.
|
||||||
|
*/
|
||||||
|
public boolean isDatanodeFullyStarted(boolean checkConnectionToActiveNamenode) {
|
||||||
|
if (checkConnectionToActiveNamenode) {
|
||||||
|
for (BPOfferService bp : blockPoolManager.getAllNamenodeThreads()) {
|
||||||
|
if (!bp.isInitialized() || !bp.isAlive() || bp.getActiveNN() == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
for (BPOfferService bp : blockPoolManager.getAllNamenodeThreads()) {
|
for (BPOfferService bp : blockPoolManager.getAllNamenodeThreads()) {
|
||||||
if (!bp.isInitialized() || !bp.isAlive()) {
|
if (!bp.isInitialized() || !bp.isAlive()) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -4033,12 +4058,12 @@ public class DataNode extends ReconfigurableBase
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG);
|
MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG_NAME);
|
||||||
|
|
||||||
// Schedule the periodic logging.
|
// Schedule the periodic logging.
|
||||||
metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
|
metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
|
||||||
metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
|
metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
|
||||||
metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG,
|
metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG_NAME,
|
||||||
"DataNode", (short) 0), metricsLoggerPeriodSec, metricsLoggerPeriodSec,
|
"DataNode", (short) 0), metricsLoggerPeriodSec, metricsLoggerPeriodSec,
|
||||||
TimeUnit.SECONDS);
|
TimeUnit.SECONDS);
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,8 +18,6 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.server.datanode;
|
package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock;
|
import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock;
|
||||||
|
@ -29,11 +27,14 @@ import java.util.HashMap;
|
||||||
import java.util.Stack;
|
import java.util.Stack;
|
||||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class for maintain a set of lock for fsDataSetImpl.
|
* Class for maintain a set of lock for fsDataSetImpl.
|
||||||
*/
|
*/
|
||||||
public class DataSetLockManager implements DataNodeLockManager<AutoCloseDataSetLock> {
|
public class DataSetLockManager implements DataNodeLockManager<AutoCloseDataSetLock> {
|
||||||
public static final Log LOG = LogFactory.getLog(DataSetLockManager.class);
|
public static final Logger LOG = LoggerFactory.getLogger(DataSetLockManager.class);
|
||||||
private final HashMap<String, TrackLog> threadCountMap = new HashMap<>();
|
private final HashMap<String, TrackLog> threadCountMap = new HashMap<>();
|
||||||
private final LockMap lockMap = new LockMap();
|
private final LockMap lockMap = new LockMap();
|
||||||
private boolean isFair = true;
|
private boolean isFair = true;
|
||||||
|
|
|
@ -21,7 +21,6 @@ import org.apache.hadoop.classification.VisibleForTesting;
|
||||||
import org.apache.hadoop.util.Preconditions;
|
import org.apache.hadoop.util.Preconditions;
|
||||||
import org.apache.hadoop.thirdparty.protobuf.ByteString;
|
import org.apache.hadoop.thirdparty.protobuf.ByteString;
|
||||||
import javax.crypto.SecretKey;
|
import javax.crypto.SecretKey;
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.hadoop.fs.FsTracer;
|
import org.apache.hadoop.fs.FsTracer;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
|
@ -105,7 +104,7 @@ import static org.apache.hadoop.util.Time.monotonicNow;
|
||||||
*/
|
*/
|
||||||
class DataXceiver extends Receiver implements Runnable {
|
class DataXceiver extends Receiver implements Runnable {
|
||||||
public static final Logger LOG = DataNode.LOG;
|
public static final Logger LOG = DataNode.LOG;
|
||||||
static final Log ClientTraceLog = DataNode.ClientTraceLog;
|
static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG;
|
||||||
|
|
||||||
private Peer peer;
|
private Peer peer;
|
||||||
private final String remoteAddress; // address of remote side
|
private final String remoteAddress; // address of remote side
|
||||||
|
@ -426,10 +425,10 @@ class DataXceiver extends Receiver implements Runnable {
|
||||||
registeredSlotId);
|
registeredSlotId);
|
||||||
datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId);
|
datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId);
|
||||||
}
|
}
|
||||||
if (ClientTraceLog.isInfoEnabled()) {
|
if (CLIENT_TRACE_LOG.isInfoEnabled()) {
|
||||||
DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk
|
DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk
|
||||||
.getBlockPoolId());
|
.getBlockPoolId());
|
||||||
BlockSender.ClientTraceLog.info(String.format(
|
BlockSender.CLIENT_TRACE_LOG.info(String.format(
|
||||||
"src: 127.0.0.1, dest: 127.0.0.1, op: REQUEST_SHORT_CIRCUIT_FDS," +
|
"src: 127.0.0.1, dest: 127.0.0.1, op: REQUEST_SHORT_CIRCUIT_FDS," +
|
||||||
" blockid: %s, srvID: %s, success: %b",
|
" blockid: %s, srvID: %s, success: %b",
|
||||||
blk.getBlockId(), dnR.getDatanodeUuid(), success));
|
blk.getBlockId(), dnR.getDatanodeUuid(), success));
|
||||||
|
@ -466,8 +465,8 @@ class DataXceiver extends Receiver implements Runnable {
|
||||||
bld.build().writeDelimitedTo(socketOut);
|
bld.build().writeDelimitedTo(socketOut);
|
||||||
success = true;
|
success = true;
|
||||||
} finally {
|
} finally {
|
||||||
if (ClientTraceLog.isInfoEnabled()) {
|
if (CLIENT_TRACE_LOG.isInfoEnabled()) {
|
||||||
BlockSender.ClientTraceLog.info(String.format(
|
BlockSender.CLIENT_TRACE_LOG.info(String.format(
|
||||||
"src: 127.0.0.1, dest: 127.0.0.1, op: RELEASE_SHORT_CIRCUIT_FDS," +
|
"src: 127.0.0.1, dest: 127.0.0.1, op: RELEASE_SHORT_CIRCUIT_FDS," +
|
||||||
" shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b",
|
" shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b",
|
||||||
slotId.getShmId().getHi(), slotId.getShmId().getLo(),
|
slotId.getShmId().getHi(), slotId.getShmId().getLo(),
|
||||||
|
@ -526,9 +525,9 @@ class DataXceiver extends Receiver implements Runnable {
|
||||||
sendShmSuccessResponse(sock, shmInfo);
|
sendShmSuccessResponse(sock, shmInfo);
|
||||||
success = true;
|
success = true;
|
||||||
} finally {
|
} finally {
|
||||||
if (ClientTraceLog.isInfoEnabled()) {
|
if (CLIENT_TRACE_LOG.isInfoEnabled()) {
|
||||||
if (success) {
|
if (success) {
|
||||||
BlockSender.ClientTraceLog.info(String.format(
|
BlockSender.CLIENT_TRACE_LOG.info(String.format(
|
||||||
"cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
|
"cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
|
||||||
"op: REQUEST_SHORT_CIRCUIT_SHM," +
|
"op: REQUEST_SHORT_CIRCUIT_SHM," +
|
||||||
" shmId: %016x%016x, srvID: %s, success: true",
|
" shmId: %016x%016x, srvID: %s, success: true",
|
||||||
|
@ -536,7 +535,7 @@ class DataXceiver extends Receiver implements Runnable {
|
||||||
shmInfo.getShmId().getLo(),
|
shmInfo.getShmId().getLo(),
|
||||||
datanode.getDatanodeUuid()));
|
datanode.getDatanodeUuid()));
|
||||||
} else {
|
} else {
|
||||||
BlockSender.ClientTraceLog.info(String.format(
|
BlockSender.CLIENT_TRACE_LOG.info(String.format(
|
||||||
"cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
|
"cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
|
||||||
"op: REQUEST_SHORT_CIRCUIT_SHM, " +
|
"op: REQUEST_SHORT_CIRCUIT_SHM, " +
|
||||||
"shmId: n/a, srvID: %s, success: false",
|
"shmId: n/a, srvID: %s, success: false",
|
||||||
|
@ -587,13 +586,10 @@ class DataXceiver extends Receiver implements Runnable {
|
||||||
BlockSender blockSender = null;
|
BlockSender blockSender = null;
|
||||||
DatanodeRegistration dnR =
|
DatanodeRegistration dnR =
|
||||||
datanode.getDNRegistrationForBP(block.getBlockPoolId());
|
datanode.getDNRegistrationForBP(block.getBlockPoolId());
|
||||||
final String clientTraceFmt =
|
final String clientTraceFmt = clientName.length() > 0 && CLIENT_TRACE_LOG.isInfoEnabled() ?
|
||||||
clientName.length() > 0 && ClientTraceLog.isInfoEnabled()
|
String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, "", "%d", "HDFS_READ",
|
||||||
? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress,
|
clientName, "%d", dnR.getDatanodeUuid(), block, "%d") :
|
||||||
"", "%d", "HDFS_READ", clientName, "%d",
|
dnR + " Served block " + block + " to " + remoteAddress;
|
||||||
dnR.getDatanodeUuid(), block, "%d")
|
|
||||||
: dnR + " Served block " + block + " to " +
|
|
||||||
remoteAddress;
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.commons.lang3.builder.ToStringBuilder;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
@ -84,4 +85,12 @@ public class ErrorReportAction implements BPServiceActorAction {
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return new ToStringBuilder(this)
|
||||||
|
.append("errorCode", errorCode)
|
||||||
|
.append("errorMessage", errorMessage)
|
||||||
|
.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.commons.lang3.builder.ToStringBuilder;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||||
|
@ -111,4 +112,13 @@ public class ReportBadBlockAction implements BPServiceActorAction {
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return new ToStringBuilder(this)
|
||||||
|
.append("block", block)
|
||||||
|
.append("storageUuid", storageUuid)
|
||||||
|
.append("storageType", storageType)
|
||||||
|
.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1557,7 +1557,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
||||||
@Override // FsDatasetSpi
|
@Override // FsDatasetSpi
|
||||||
public Replica recoverClose(ExtendedBlock b, long newGS,
|
public Replica recoverClose(ExtendedBlock b, long newGS,
|
||||||
long expectedBlockLen) throws IOException {
|
long expectedBlockLen) throws IOException {
|
||||||
LOG.info("Recover failed close " + b);
|
LOG.info("Recover failed close {}, new GS:{}, expectedBlockLen:{}",
|
||||||
|
b, newGS, expectedBlockLen);
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME,
|
try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME,
|
||||||
|
|
|
@ -84,9 +84,9 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
|
||||||
doubleBuf = new EditsDoubleBuffer(size);
|
doubleBuf = new EditsDoubleBuffer(size);
|
||||||
RandomAccessFile rp;
|
RandomAccessFile rp;
|
||||||
if (shouldSyncWritesAndSkipFsync) {
|
if (shouldSyncWritesAndSkipFsync) {
|
||||||
rp = new RandomAccessFile(name, "rw");
|
rp = new RandomAccessFile(name, "rwd");
|
||||||
} else {
|
} else {
|
||||||
rp = new RandomAccessFile(name, "rws");
|
rp = new RandomAccessFile(name, "rw");
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
fp = new FileOutputStream(rp.getFD()); // open for append
|
fp = new FileOutputStream(rp.getFD()); // open for append
|
||||||
|
|
|
@ -185,9 +185,6 @@ import javax.management.NotCompliantMBeanException;
|
||||||
import javax.management.ObjectName;
|
import javax.management.ObjectName;
|
||||||
import javax.management.StandardMBean;
|
import javax.management.StandardMBean;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -405,7 +402,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
private final String contextFieldSeparator;
|
private final String contextFieldSeparator;
|
||||||
|
|
||||||
boolean isAuditEnabled() {
|
boolean isAuditEnabled() {
|
||||||
return (!isDefaultAuditLogger || auditLog.isInfoEnabled())
|
return (!isDefaultAuditLogger || AUDIT_LOG.isInfoEnabled())
|
||||||
&& !auditLoggers.isEmpty();
|
&& !auditLoggers.isEmpty();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -491,8 +488,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
* perm=<permissions (optional)>
|
* perm=<permissions (optional)>
|
||||||
* </code>
|
* </code>
|
||||||
*/
|
*/
|
||||||
public static final Log auditLog = LogFactory.getLog(
|
public static final Logger AUDIT_LOG = Logger.getLogger(FSNamesystem.class.getName() + ".audit");
|
||||||
FSNamesystem.class.getName() + ".audit");
|
|
||||||
|
|
||||||
private final int maxCorruptFileBlocksReturn;
|
private final int maxCorruptFileBlocksReturn;
|
||||||
private final boolean isPermissionEnabled;
|
private final boolean isPermissionEnabled;
|
||||||
|
@ -5943,6 +5939,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
}
|
}
|
||||||
// Ensure we record the new generation stamp
|
// Ensure we record the new generation stamp
|
||||||
getEditLog().logSync();
|
getEditLog().logSync();
|
||||||
|
LOG.info("bumpBlockGenerationStamp({}, client={}) success",
|
||||||
|
locatedBlock.getBlock(), clientName);
|
||||||
return locatedBlock;
|
return locatedBlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8783,8 +8781,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
FileStatus status, CallerContext callerContext, UserGroupInformation ugi,
|
FileStatus status, CallerContext callerContext, UserGroupInformation ugi,
|
||||||
DelegationTokenSecretManager dtSecretManager) {
|
DelegationTokenSecretManager dtSecretManager) {
|
||||||
|
|
||||||
if (auditLog.isDebugEnabled() ||
|
if (AUDIT_LOG.isDebugEnabled() ||
|
||||||
(auditLog.isInfoEnabled() && !debugCmdSet.contains(cmd))) {
|
(AUDIT_LOG.isInfoEnabled() && !debugCmdSet.contains(cmd))) {
|
||||||
final StringBuilder sb = STRING_BUILDER.get();
|
final StringBuilder sb = STRING_BUILDER.get();
|
||||||
src = escapeJava(src);
|
src = escapeJava(src);
|
||||||
dst = escapeJava(dst);
|
dst = escapeJava(dst);
|
||||||
|
@ -8853,16 +8851,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
}
|
}
|
||||||
|
|
||||||
public void logAuditMessage(String message) {
|
public void logAuditMessage(String message) {
|
||||||
auditLog.info(message);
|
AUDIT_LOG.info(message);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void enableAsyncAuditLog(Configuration conf) {
|
private static void enableAsyncAuditLog(Configuration conf) {
|
||||||
if (!(auditLog instanceof Log4JLogger)) {
|
Logger logger = AUDIT_LOG;
|
||||||
LOG.warn("Log4j is required to enable async auditlog");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
Logger logger = ((Log4JLogger)auditLog).getLogger();
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
List<Appender> appenders = Collections.list(logger.getAllAppenders());
|
List<Appender> appenders = Collections.list(logger.getAllAppenders());
|
||||||
// failsafe against trying to async it more than once
|
// failsafe against trying to async it more than once
|
||||||
|
|
|
@ -17,9 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.Configured;
|
import org.apache.hadoop.conf.Configured;
|
||||||
|
@ -125,15 +122,10 @@ public class FsImageValidation {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void setLogLevel(Class<?> clazz, Level level) {
|
static void setLogLevel(Class<?> clazz, Level level) {
|
||||||
final Log log = LogFactory.getLog(clazz);
|
final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(clazz);
|
||||||
if (log instanceof Log4JLogger) {
|
logger.setLevel(level);
|
||||||
final org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger();
|
LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", clazz.getName(), level,
|
||||||
logger.setLevel(level);
|
logger.getEffectiveLevel());
|
||||||
LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}",
|
|
||||||
clazz.getName(), level, logger.getEffectiveLevel());
|
|
||||||
} else {
|
|
||||||
LOG.warn("Failed setLogLevel {} to {}", clazz.getName(), level);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static String toCommaSeparatedNumber(long n) {
|
static String toCommaSeparatedNumber(long n) {
|
||||||
|
|
|
@ -25,8 +25,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
|
||||||
import org.apache.hadoop.util.Preconditions;
|
import org.apache.hadoop.util.Preconditions;
|
||||||
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -427,8 +425,7 @@ public class NameNode extends ReconfigurableBase implements
|
||||||
|
|
||||||
private static final String NAMENODE_HTRACE_PREFIX = "namenode.htrace.";
|
private static final String NAMENODE_HTRACE_PREFIX = "namenode.htrace.";
|
||||||
|
|
||||||
public static final Log MetricsLog =
|
public static final String METRICS_LOG_NAME = "NameNodeMetricsLog";
|
||||||
LogFactory.getLog("NameNodeMetricsLog");
|
|
||||||
|
|
||||||
protected FSNamesystem namesystem;
|
protected FSNamesystem namesystem;
|
||||||
protected final NamenodeRole role;
|
protected final NamenodeRole role;
|
||||||
|
@ -949,13 +946,13 @@ public class NameNode extends ReconfigurableBase implements
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
MetricsLoggerTask.makeMetricsLoggerAsync(MetricsLog);
|
MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG_NAME);
|
||||||
|
|
||||||
// Schedule the periodic logging.
|
// Schedule the periodic logging.
|
||||||
metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
|
metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
|
||||||
metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(
|
metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(
|
||||||
false);
|
false);
|
||||||
metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(MetricsLog,
|
metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG_NAME,
|
||||||
"NameNode", (short) 128),
|
"NameNode", (short) 128),
|
||||||
metricsLoggerPeriodSec,
|
metricsLoggerPeriodSec,
|
||||||
metricsLoggerPeriodSec,
|
metricsLoggerPeriodSec,
|
||||||
|
|
|
@ -2721,10 +2721,10 @@
|
||||||
<description>
|
<description>
|
||||||
Specifies whether to flush edit log file channel. When set, expensive
|
Specifies whether to flush edit log file channel. When set, expensive
|
||||||
FileChannel#force calls are skipped and synchronous disk writes are
|
FileChannel#force calls are skipped and synchronous disk writes are
|
||||||
enabled instead by opening the edit log file with RandomAccessFile("rws")
|
enabled instead by opening the edit log file with RandomAccessFile("rwd")
|
||||||
flags. This can significantly improve the performance of edit log writes
|
flags. This can significantly improve the performance of edit log writes
|
||||||
on the Windows platform.
|
on the Windows platform.
|
||||||
Note that the behavior of the "rws" flags is platform and hardware specific
|
Note that the behavior of the "rwd" flags is platform and hardware specific
|
||||||
and might not provide the same level of guarantees as FileChannel#force.
|
and might not provide the same level of guarantees as FileChannel#force.
|
||||||
For example, the write will skip the disk-cache on SAS and SCSI devices
|
For example, the write will skip the disk-cache on SAS and SCSI devices
|
||||||
while it might not on SATA devices. This is an expert level setting,
|
while it might not on SATA devices. This is an expert level setting,
|
||||||
|
|
|
@ -81,9 +81,11 @@
|
||||||
<thead>
|
<thead>
|
||||||
<tr>
|
<tr>
|
||||||
<th>Namenode Address</th>
|
<th>Namenode Address</th>
|
||||||
|
<th>Namenode HA State</th>
|
||||||
<th>Block Pool ID</th>
|
<th>Block Pool ID</th>
|
||||||
<th>Actor State</th>
|
<th>Actor State</th>
|
||||||
<th>Last Heartbeat</th>
|
<th>Last Heartbeat Sent</th>
|
||||||
|
<th>Last Heartbeat Response</th>
|
||||||
<th>Last Block Report</th>
|
<th>Last Block Report</th>
|
||||||
<th>Last Block Report Size (Max Size)</th>
|
<th>Last Block Report Size (Max Size)</th>
|
||||||
</tr>
|
</tr>
|
||||||
|
@ -91,9 +93,11 @@
|
||||||
{#dn.BPServiceActorInfo}
|
{#dn.BPServiceActorInfo}
|
||||||
<tr>
|
<tr>
|
||||||
<td>{NamenodeAddress}</td>
|
<td>{NamenodeAddress}</td>
|
||||||
|
<td>{NamenodeHaState}</td>
|
||||||
<td>{BlockPoolID}</td>
|
<td>{BlockPoolID}</td>
|
||||||
<td>{ActorState}</td>
|
<td>{ActorState}</td>
|
||||||
<td>{LastHeartbeat}s</td>
|
<td>{LastHeartbeat}s</td>
|
||||||
|
<td>{LastHeartbeatResponseTime}s</td>
|
||||||
<td>{#helper_relative_time value="{LastBlockReport}"/}</td>
|
<td>{#helper_relative_time value="{LastBlockReport}"/}</td>
|
||||||
<td>{maxBlockReportSize|fmt_bytes} ({maxDataLength|fmt_bytes})</td>
|
<td>{maxBlockReportSize|fmt_bytes} ({maxDataLength|fmt_bytes})</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
|
|
@ -361,6 +361,7 @@
|
||||||
<td></td>
|
<td></td>
|
||||||
<td></td>
|
<td></td>
|
||||||
<td></td>
|
<td></td>
|
||||||
|
<td></td>
|
||||||
</tr>
|
</tr>
|
||||||
{/DeadNodes}
|
{/DeadNodes}
|
||||||
</table>
|
</table>
|
||||||
|
|
|
@ -36,11 +36,14 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
|
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
import org.slf4j.event.Level;
|
import org.slf4j.event.Level;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -52,6 +55,9 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
|
||||||
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.TRACE);
|
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.TRACE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(
|
||||||
|
TestSymlinkHdfs.class);
|
||||||
|
|
||||||
protected static MiniDFSCluster cluster;
|
protected static MiniDFSCluster cluster;
|
||||||
protected static WebHdfsFileSystem webhdfs;
|
protected static WebHdfsFileSystem webhdfs;
|
||||||
protected static DistributedFileSystem dfs;
|
protected static DistributedFileSystem dfs;
|
||||||
|
@ -99,6 +105,7 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
IOUtils.cleanupWithLogger(LOG, webhdfs);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=10000)
|
@Test(timeout=10000)
|
||||||
|
|
|
@ -2529,6 +2529,24 @@ public class MiniDFSCluster implements AutoCloseable {
|
||||||
return restartDataNode(dnprop, false);
|
return restartDataNode(dnprop, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wait for the datanode to be fully functional i.e. all the BP service threads are alive,
|
||||||
|
* all block pools initiated and also connected to active namenode.
|
||||||
|
*
|
||||||
|
* @param dn Datanode instance.
|
||||||
|
* @param timeout Timeout in millis until when we should wait for datanode to be fully
|
||||||
|
* operational.
|
||||||
|
* @throws InterruptedException If the thread wait is interrupted.
|
||||||
|
* @throws TimeoutException If times out while awaiting the fully operational capability of
|
||||||
|
* datanode.
|
||||||
|
*/
|
||||||
|
public void waitDatanodeConnectedToActive(DataNode dn, int timeout)
|
||||||
|
throws InterruptedException, TimeoutException {
|
||||||
|
GenericTestUtils.waitFor(() -> dn.isDatanodeFullyStarted(true),
|
||||||
|
100, timeout, "Datanode is not connected to active namenode even after "
|
||||||
|
+ timeout + " ms of waiting");
|
||||||
|
}
|
||||||
|
|
||||||
public void waitDatanodeFullyStarted(DataNode dn, int timeout)
|
public void waitDatanodeFullyStarted(DataNode dn, int timeout)
|
||||||
throws TimeoutException, InterruptedException {
|
throws TimeoutException, InterruptedException {
|
||||||
GenericTestUtils.waitFor(dn::isDatanodeFullyStarted, 100, timeout,
|
GenericTestUtils.waitFor(dn::isDatanodeFullyStarted, 100, timeout,
|
||||||
|
|
|
@ -21,8 +21,6 @@ import java.net.URI;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.CreateFlag;
|
import org.apache.hadoop.fs.CreateFlag;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
@ -41,6 +39,8 @@ import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
import org.junit.runners.Parameterized;
|
import org.junit.runners.Parameterized;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
|
@ -52,7 +52,7 @@ import static org.junit.Assert.*;
|
||||||
*/
|
*/
|
||||||
@RunWith(Parameterized.class)
|
@RunWith(Parameterized.class)
|
||||||
public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
|
public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
|
||||||
public static final Log LOG = LogFactory.getLog(TestPermission.class);
|
public static final Logger LOG = LoggerFactory.getLogger(TestPermission.class);
|
||||||
|
|
||||||
private HdfsConfiguration conf;
|
private HdfsConfiguration conf;
|
||||||
private MiniDFSCluster cluster;
|
private MiniDFSCluster cluster;
|
||||||
|
|
|
@ -190,7 +190,7 @@ public class TestDFSRename {
|
||||||
Path path = new Path("/test");
|
Path path = new Path("/test");
|
||||||
dfs.mkdirs(path);
|
dfs.mkdirs(path);
|
||||||
GenericTestUtils.LogCapturer auditLog =
|
GenericTestUtils.LogCapturer auditLog =
|
||||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
|
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||||
dfs.rename(path, new Path("/dir1"),
|
dfs.rename(path, new Path("/dir1"),
|
||||||
new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH});
|
new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH});
|
||||||
String auditOut = auditLog.getOutput();
|
String auditOut = auditLog.getOutput();
|
||||||
|
|
|
@ -32,7 +32,7 @@ public class TestFileLengthOnClusterRestart {
|
||||||
* Tests the fileLength when we sync the file and restart the cluster and
|
* Tests the fileLength when we sync the file and restart the cluster and
|
||||||
* Datanodes not report to Namenode yet.
|
* Datanodes not report to Namenode yet.
|
||||||
*/
|
*/
|
||||||
@Test(timeout = 60000)
|
@Test(timeout = 120000)
|
||||||
public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister()
|
public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister()
|
||||||
throws Exception {
|
throws Exception {
|
||||||
final Configuration conf = new HdfsConfiguration();
|
final Configuration conf = new HdfsConfiguration();
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue