Merge trunk into branch
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3077@1396918 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
ff8f84ccf6
|
@ -335,7 +335,7 @@ checkTests () {
|
|||
echo "The patch appears to be a documentation patch that doesn't require tests."
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
+0 tests included. The patch appears to be a documentation patch that doesn't require tests."
|
||||
{color:green}+0 tests included{color}. The patch appears to be a documentation patch that doesn't require tests."
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
@ -681,12 +681,46 @@ runTests () {
|
|||
|
||||
failed_tests=""
|
||||
modules=$(findModules)
|
||||
for module in $modules;
|
||||
do
|
||||
#
|
||||
# If we are building hadoop-hdfs-project, we must build the native component
|
||||
# of hadoop-common-project first. In order to accomplish this, we move the
|
||||
# hadoop-hdfs subprojects to the end of the list so that common will come
|
||||
# first.
|
||||
#
|
||||
# Of course, we may not be building hadoop-common at all-- in this case, we
|
||||
# explicitly insert a mvn compile -Pnative of common, to ensure that the
|
||||
# native libraries show up where we need them.
|
||||
#
|
||||
building_common=0
|
||||
for module in $modules; do
|
||||
if [[ $module == hadoop-hdfs-project* ]]; then
|
||||
hdfs_modules="$hdfs_modules $module"
|
||||
elif [[ $module == hadoop-common-project* ]]; then
|
||||
ordered_modules="$ordered_modules $module"
|
||||
building_common=1
|
||||
else
|
||||
ordered_modules="$ordered_modules $module"
|
||||
fi
|
||||
done
|
||||
if [ -n $hdfs_modules ]; then
|
||||
ordered_modules="$ordered_modules $hdfs_modules"
|
||||
if [[ $building_common -eq 0 ]]; then
|
||||
echo " Building hadoop-common with -Pnative in order to provide \
|
||||
libhadoop.so to the hadoop-hdfs unit tests."
|
||||
echo " $MVN compile -Pnative -D${PROJECT_NAME}PatchProcess"
|
||||
if ! $MVN compile -Pnative -D${PROJECT_NAME}PatchProcess; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
{color:red}-1 core tests{color}. Failed to build the native portion \
|
||||
of hadoop-common prior to running the unit tests in $ordered_modules"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
for module in $ordered_modules; do
|
||||
cd $module
|
||||
echo " Running tests in $module"
|
||||
echo " $MVN clean install -fn -Pnative -D${PROJECT_NAME}PatchProcess"
|
||||
$MVN clean install -fn -Pnative -D${PROJECT_NAME}PatchProcess
|
||||
$MVN clean install -fn -Pnative -Drequire.test.libhadoop -D${PROJECT_NAME}PatchProcess
|
||||
module_failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-| |g" | sed -e "s|\.xml||g"`
|
||||
# With -fn mvn always exits with a 0 exit code. Because of this we need to
|
||||
# find the errors instead of using the exit code. We assume that if the build
|
||||
|
@ -914,6 +948,7 @@ if [[ $RESULT != 0 ]] ; then
|
|||
fi
|
||||
buildWithPatch
|
||||
checkAuthor
|
||||
(( RESULT = RESULT + $? ))
|
||||
|
||||
if [[ $JENKINS == "true" ]] ; then
|
||||
cleanUpXml
|
||||
|
|
|
@ -19,6 +19,8 @@ import org.ietf.jgss.GSSContext;
|
|||
import org.ietf.jgss.GSSManager;
|
||||
import org.ietf.jgss.GSSName;
|
||||
import org.ietf.jgss.Oid;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.security.auth.Subject;
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
|
@ -44,6 +46,9 @@ import java.util.Map;
|
|||
* sequence.
|
||||
*/
|
||||
public class KerberosAuthenticator implements Authenticator {
|
||||
|
||||
private static Logger LOG = LoggerFactory.getLogger(
|
||||
KerberosAuthenticator.class);
|
||||
|
||||
/**
|
||||
* HTTP header used by the SPNEGO server endpoint during an authentication sequence.
|
||||
|
@ -152,9 +157,18 @@ public class KerberosAuthenticator implements Authenticator {
|
|||
}
|
||||
conn.setRequestMethod(AUTH_HTTP_METHOD);
|
||||
conn.connect();
|
||||
if (isNegotiate()) {
|
||||
|
||||
if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
|
||||
LOG.debug("JDK performed authentication on our behalf.");
|
||||
// If the JDK already did the SPNEGO back-and-forth for
|
||||
// us, just pull out the token.
|
||||
AuthenticatedURL.extractToken(conn, token);
|
||||
return;
|
||||
} else if (isNegotiate()) {
|
||||
LOG.debug("Performing our own SPNEGO sequence.");
|
||||
doSpnegoSequence(token);
|
||||
} else {
|
||||
LOG.debug("Using fallback authenticator sequence.");
|
||||
getFallBackAuthenticator().authenticate(url, token);
|
||||
}
|
||||
}
|
||||
|
@ -168,7 +182,11 @@ public class KerberosAuthenticator implements Authenticator {
|
|||
* @return the fallback {@link Authenticator}.
|
||||
*/
|
||||
protected Authenticator getFallBackAuthenticator() {
|
||||
return new PseudoAuthenticator();
|
||||
Authenticator auth = new PseudoAuthenticator();
|
||||
if (connConfigurator != null) {
|
||||
auth.setConnectionConfigurator(connConfigurator);
|
||||
}
|
||||
return auth;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -197,11 +215,16 @@ public class KerberosAuthenticator implements Authenticator {
|
|||
AccessControlContext context = AccessController.getContext();
|
||||
Subject subject = Subject.getSubject(context);
|
||||
if (subject == null) {
|
||||
LOG.debug("No subject in context, logging in");
|
||||
subject = new Subject();
|
||||
LoginContext login = new LoginContext("", subject,
|
||||
null, new KerberosConfiguration());
|
||||
login.login();
|
||||
}
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Using subject: " + subject);
|
||||
}
|
||||
Subject.doAs(subject, new PrivilegedExceptionAction<Void>() {
|
||||
|
||||
@Override
|
||||
|
|
|
@ -7,6 +7,8 @@ Trunk (Unreleased)
|
|||
HADOOP-8124. Remove the deprecated FSDataOutputStream constructor,
|
||||
FSDataOutputStream.sync() and Syncable.sync(). (szetszwo)
|
||||
|
||||
HADOOP-8886. Remove KFS support. (eli)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
HADOOP-8469. Make NetworkTopology class pluggable. (Junping Du via
|
||||
|
@ -117,6 +119,9 @@ Trunk (Unreleased)
|
|||
HADOOP-8840. Fix the test-patch colorizer to cover all sorts of +1 lines.
|
||||
(Harsh J via bobby)
|
||||
|
||||
HADOOP-8864. Addendum to HADOOP-8840: Add a coloring case for +0 results
|
||||
too. (harsh)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
|
||||
|
@ -244,6 +249,16 @@ Trunk (Unreleased)
|
|||
required context item is not configured
|
||||
(Brahma Reddy Battula via harsh)
|
||||
|
||||
HADOOP-3957. Change MutableQuantiles to use a shared thread for rolling
|
||||
over metrics. (Andrew Wang via todd)
|
||||
|
||||
HADOOP-8386. hadoop script doesn't work if 'cd' prints to stdout
|
||||
(default behavior in some bash setups (esp. Ubuntu))
|
||||
(Chiristopher Berner and Andy Isaacson via harsh)
|
||||
|
||||
HADOOP-8839. test-patch's -1 on @author tag presence doesn't cause
|
||||
a -1 to the overall result (harsh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||
|
@ -272,8 +287,24 @@ Release 2.0.3-alpha - Unreleased
|
|||
|
||||
HADOOP-8736. Add Builder for building RPC server. (Brandon Li via Suresh)
|
||||
|
||||
HADOOP-8851. Use -XX:+HeapDumpOnOutOfMemoryError JVM option in the forked
|
||||
tests. (Ivan A. Veselovsky via atm)
|
||||
|
||||
HADOOP-8783. Improve RPC.Server's digest auth (daryn)
|
||||
|
||||
HADOOP-8889. Upgrade to Surefire 2.12.3 (todd)
|
||||
|
||||
HADOOP-8804. Improve Web UIs when the wildcard address is used.
|
||||
(Senthil Kumar via eli)
|
||||
|
||||
HADOOP-8894. GenericTestUtils.waitFor should dump thread stacks on timeout
|
||||
(todd)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
|
||||
via atm)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-8795. BASH tab completion doesn't look in PATH, assumes path to
|
||||
|
@ -288,6 +319,14 @@ Release 2.0.3-alpha - Unreleased
|
|||
HADOOP-8791. Fix rm command documentation to indicte it deletes
|
||||
files and not directories. (Jing Zhao via suresh)
|
||||
|
||||
HADOOP-8616. ViewFS configuration requires a trailing slash. (Sandy Ryza
|
||||
via atm)
|
||||
|
||||
HADOOP-8756. Fix SEGV when libsnappy is in java.library.path but
|
||||
not LD_LIBRARY_PATH. (Colin Patrick McCabe via eli)
|
||||
|
||||
HADOOP-8881. FileBasedKeyStoresFactory initialization logging should be debug not info. (tucu)
|
||||
|
||||
Release 2.0.2-alpha - 2012-09-07
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -298,6 +337,8 @@ Release 2.0.2-alpha - 2012-09-07
|
|||
HADOOP-8689. Make trash a server side configuration option. (eli)
|
||||
|
||||
HADOOP-8710. Remove ability for users to easily run the trash emptire. (eli)
|
||||
|
||||
HADOOP-8794. Rename YARN_HOME to HADOOP_YARN_HOME. (vinodkv via acmurthy)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
|
@ -543,8 +584,6 @@ Release 2.0.2-alpha - 2012-09-07
|
|||
HADOOP-8031. Configuration class fails to find embedded .jar resources;
|
||||
should use URL.openStream() (genman via tucu)
|
||||
|
||||
HADOOP-8738. junit JAR is showing up in the distro (tucu)
|
||||
|
||||
HADOOP-8737. cmake: always use JAVA_HOME to find libjvm.so, jni.h, jni_md.h.
|
||||
(Colin Patrick McCabe via eli)
|
||||
|
||||
|
@ -574,6 +613,8 @@ Release 2.0.2-alpha - 2012-09-07
|
|||
|
||||
HADOOP-8781. hadoop-config.sh should add JAVA_LIBRARY_PATH to LD_LIBRARY_PATH. (tucu)
|
||||
|
||||
HADOOP-8855. SSL-based image transfer does not work when Kerberos is disabled. (todd via eli)
|
||||
|
||||
BREAKDOWN OF HDFS-3042 SUBTASKS
|
||||
|
||||
HADOOP-8220. ZKFailoverController doesn't handle failure to become active
|
||||
|
@ -976,6 +1017,18 @@ Release 2.0.0-alpha - 05-23-2012
|
|||
HADOOP-8655. Fix TextInputFormat for large deliminators. (Gelesh via
|
||||
bobby)
|
||||
|
||||
Release 0.23.5 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
Release 0.23.4 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -990,7 +1043,10 @@ Release 0.23.4 - UNRELEASED
|
|||
|
||||
BUG FIXES
|
||||
|
||||
Release 0.23.3 - UNRELEASED
|
||||
HADOOP-8843. Old trash directories are never deleted on upgrade
|
||||
from 1.x (jlowe)
|
||||
|
||||
Release 0.23.3
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
|
|
|
@ -175,18 +175,6 @@
|
|||
<Bug pattern="ES_COMPARING_STRINGS_WITH_EQ" />
|
||||
</Match>
|
||||
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.fs.kfs.KFSOutputStream" />
|
||||
<Field name="path" />
|
||||
<Bug pattern="URF_UNREAD_FIELD" />
|
||||
</Match>
|
||||
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.fs.kfs.KosmosFileSystem" />
|
||||
<Method name="initialize" />
|
||||
<Bug pattern="DM_EXIT" />
|
||||
</Match>
|
||||
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.io.Closeable" />
|
||||
<Bug pattern="NM_SAME_SIMPLE_NAME_AS_INTERFACE" />
|
||||
|
|
|
@ -194,11 +194,6 @@
|
|||
<artifactId>avro</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.sf.kosmosfs</groupId>
|
||||
<artifactId>kfs</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.ant</groupId>
|
||||
<artifactId>ant</artifactId>
|
||||
|
|
|
@ -123,6 +123,7 @@ add_dual_library(hadoop
|
|||
${D}/security/JniBasedUnixGroupsMapping.c
|
||||
${D}/security/JniBasedUnixGroupsNetgroupMapping.c
|
||||
${D}/security/getGroup.c
|
||||
${D}/util/NativeCodeLoader.c
|
||||
${D}/util/NativeCrc32.c
|
||||
${D}/util/bulk_crc32.c
|
||||
)
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
#define CONFIG_H
|
||||
|
||||
#cmakedefine HADOOP_ZLIB_LIBRARY "@HADOOP_ZLIB_LIBRARY@"
|
||||
#cmakedefine HADOOP_RUNAS_HOME "@HADOOP_RUNAS_HOME@"
|
||||
#cmakedefine HADOOP_SNAPPY_LIBRARY "@HADOOP_SNAPPY_LIBRARY@"
|
||||
#cmakedefine HAVE_SYNC_FILE_RANGE
|
||||
#cmakedefine HAVE_POSIX_FADVISE
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
bin=`which $0`
|
||||
bin=`dirname ${bin}`
|
||||
bin=`cd "$bin"; pwd`
|
||||
bin=`cd "$bin" > /dev/null; pwd`
|
||||
|
||||
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
|
||||
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
|
||||
|
|
|
@ -269,21 +269,21 @@ fi
|
|||
CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/$HDFS_DIR'/*'
|
||||
|
||||
# put yarn in classpath if present
|
||||
if [ "$YARN_HOME" = "" ]; then
|
||||
if [ "$HADOOP_YARN_HOME" = "" ]; then
|
||||
if [ -d "${HADOOP_PREFIX}/$YARN_DIR" ]; then
|
||||
export YARN_HOME=$HADOOP_PREFIX
|
||||
export HADOOP_YARN_HOME=$HADOOP_PREFIX
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -d "$YARN_HOME/$YARN_DIR/webapps" ]; then
|
||||
CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_DIR
|
||||
if [ -d "$HADOOP_YARN_HOME/$YARN_DIR/webapps" ]; then
|
||||
CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR
|
||||
fi
|
||||
|
||||
if [ -d "$YARN_HOME/$YARN_LIB_JARS_DIR" ]; then
|
||||
CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_LIB_JARS_DIR'/*'
|
||||
if [ -d "$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR" ]; then
|
||||
CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR'/*'
|
||||
fi
|
||||
|
||||
CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_DIR'/*'
|
||||
CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR'/*'
|
||||
|
||||
# put mapred in classpath if present AND different from YARN
|
||||
if [ "$HADOOP_MAPRED_HOME" = "" ]; then
|
||||
|
@ -292,7 +292,7 @@ if [ "$HADOOP_MAPRED_HOME" = "" ]; then
|
|||
fi
|
||||
fi
|
||||
|
||||
if [ "$HADOOP_MAPRED_HOME/$MAPRED_DIR" != "$YARN_HOME/$YARN_DIR" ] ; then
|
||||
if [ "$HADOOP_MAPRED_HOME/$MAPRED_DIR" != "$HADOOP_YARN_HOME/$YARN_DIR" ] ; then
|
||||
if [ -d "$HADOOP_MAPRED_HOME/$MAPRED_DIR/webapps" ]; then
|
||||
CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/$MAPRED_DIR
|
||||
fi
|
||||
|
|
|
@ -33,6 +33,6 @@ if [ -f "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh ]; then
|
|||
fi
|
||||
|
||||
# start yarn daemons if yarn is present
|
||||
if [ -f "${YARN_HOME}"/sbin/start-yarn.sh ]; then
|
||||
"${YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
|
||||
if [ -f "${HADOOP_YARN_HOME}"/sbin/start-yarn.sh ]; then
|
||||
"${HADOOP_YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
|
||||
fi
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
|
||||
|
||||
*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
|
||||
# default sampling period
|
||||
# default sampling period, in seconds
|
||||
*.period=10
|
||||
|
||||
# The namenode-metrics.out will contain metrics from all context
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -61,6 +61,9 @@ public class TrashPolicyDefault extends TrashPolicy {
|
|||
new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
|
||||
|
||||
private static final DateFormat CHECKPOINT = new SimpleDateFormat("yyMMddHHmmss");
|
||||
/** Format of checkpoint directories used prior to Hadoop 0.23. */
|
||||
private static final DateFormat OLD_CHECKPOINT =
|
||||
new SimpleDateFormat("yyMMddHHmm");
|
||||
private static final int MSECS_PER_MINUTE = 60*1000;
|
||||
|
||||
private Path current;
|
||||
|
@ -69,8 +72,9 @@ public class TrashPolicyDefault extends TrashPolicy {
|
|||
|
||||
public TrashPolicyDefault() { }
|
||||
|
||||
private TrashPolicyDefault(Path home, Configuration conf) throws IOException {
|
||||
initialize(conf, home.getFileSystem(conf), home);
|
||||
private TrashPolicyDefault(FileSystem fs, Path home, Configuration conf)
|
||||
throws IOException {
|
||||
initialize(conf, fs, home);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -202,9 +206,7 @@ public class TrashPolicyDefault extends TrashPolicy {
|
|||
|
||||
long time;
|
||||
try {
|
||||
synchronized (CHECKPOINT) {
|
||||
time = CHECKPOINT.parse(name).getTime();
|
||||
}
|
||||
time = getTimeFromCheckpoint(name);
|
||||
} catch (ParseException e) {
|
||||
LOG.warn("Unexpected item in trash: "+dir+". Ignoring.");
|
||||
continue;
|
||||
|
@ -278,7 +280,8 @@ public class TrashPolicyDefault extends TrashPolicy {
|
|||
if (!home.isDirectory())
|
||||
continue;
|
||||
try {
|
||||
TrashPolicyDefault trash = new TrashPolicyDefault(home.getPath(), conf);
|
||||
TrashPolicyDefault trash = new TrashPolicyDefault(
|
||||
fs, home.getPath(), conf);
|
||||
trash.deleteCheckpoint();
|
||||
trash.createCheckpoint();
|
||||
} catch (IOException e) {
|
||||
|
@ -304,4 +307,22 @@ public class TrashPolicyDefault extends TrashPolicy {
|
|||
return (time / interval) * interval;
|
||||
}
|
||||
}
|
||||
|
||||
private long getTimeFromCheckpoint(String name) throws ParseException {
|
||||
long time;
|
||||
|
||||
try {
|
||||
synchronized (CHECKPOINT) {
|
||||
time = CHECKPOINT.parse(name).getTime();
|
||||
}
|
||||
} catch (ParseException pe) {
|
||||
// Check for old-style checkpoint directories left over
|
||||
// after an upgrade from Hadoop 1.x
|
||||
synchronized (OLD_CHECKPOINT) {
|
||||
time = OLD_CHECKPOINT.parse(name).getTime();
|
||||
}
|
||||
}
|
||||
|
||||
return time;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
/**
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
* implied. See the License for the specific language governing
|
||||
* permissions and limitations under the License.
|
||||
*
|
||||
*
|
||||
* We need to provide the ability to the code in fs/kfs without really
|
||||
* having a KFS deployment. In particular, the glue code that wraps
|
||||
* around calls to KfsAccess object. This is accomplished by defining a
|
||||
* filesystem implementation interface:
|
||||
* -- for testing purposes, a dummy implementation of this interface
|
||||
* will suffice; as long as the dummy implementation is close enough
|
||||
* to doing what KFS does, we are good.
|
||||
* -- for deployment purposes with KFS, this interface is
|
||||
* implemented by the KfsImpl object.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.kfs;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
||||
interface IFSImpl {
|
||||
public boolean exists(String path) throws IOException;
|
||||
public boolean isDirectory(String path) throws IOException;
|
||||
public boolean isFile(String path) throws IOException;
|
||||
public String[] readdir(String path) throws IOException;
|
||||
public FileStatus[] readdirplus(Path path) throws IOException;
|
||||
|
||||
public int mkdirs(String path) throws IOException;
|
||||
public int rename(String source, String dest) throws IOException;
|
||||
|
||||
public int rmdir(String path) throws IOException;
|
||||
public int remove(String path) throws IOException;
|
||||
public long filesize(String path) throws IOException;
|
||||
public short getReplication(String path) throws IOException;
|
||||
public short setReplication(String path, short replication) throws IOException;
|
||||
public String[][] getDataLocation(String path, long start, long len) throws IOException;
|
||||
|
||||
public long getModificationTime(String path) throws IOException;
|
||||
public FSDataOutputStream create(String path, short replication, int bufferSize, Progressable progress) throws IOException;
|
||||
public FSDataInputStream open(String path, int bufferSize) throws IOException;
|
||||
public FSDataOutputStream append(String path, int bufferSize, Progressable progress) throws IOException;
|
||||
|
||||
};
|
|
@ -1,47 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.kfs;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
|
||||
/**
|
||||
* This class contains constants for configuration keys used
|
||||
* in the kfs file system.
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class KFSConfigKeys extends CommonConfigurationKeys {
|
||||
public static final String KFS_BLOCK_SIZE_KEY = "kfs.blocksize";
|
||||
public static final long KFS_BLOCK_SIZE_DEFAULT = 64*1024*1024;
|
||||
public static final String KFS_REPLICATION_KEY = "kfs.replication";
|
||||
public static final short KFS_REPLICATION_DEFAULT = 1;
|
||||
public static final String KFS_STREAM_BUFFER_SIZE_KEY =
|
||||
"kfs.stream-buffer-size";
|
||||
public static final int KFS_STREAM_BUFFER_SIZE_DEFAULT = 4096;
|
||||
public static final String KFS_BYTES_PER_CHECKSUM_KEY =
|
||||
"kfs.bytes-per-checksum";
|
||||
public static final int KFS_BYTES_PER_CHECKSUM_DEFAULT = 512;
|
||||
public static final String KFS_CLIENT_WRITE_PACKET_SIZE_KEY =
|
||||
"kfs.client-write-packet-size";
|
||||
public static final int KFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
|
||||
}
|
||||
|
|
@ -1,171 +0,0 @@
|
|||
/**
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
* implied. See the License for the specific language governing
|
||||
* permissions and limitations under the License.
|
||||
*
|
||||
*
|
||||
* Provide the implementation of KFS which turn into calls to KfsAccess.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.kfs;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import org.kosmix.kosmosfs.access.KfsAccess;
|
||||
import org.kosmix.kosmosfs.access.KfsFileAttr;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
class KFSImpl implements IFSImpl {
|
||||
private KfsAccess kfsAccess = null;
|
||||
private FileSystem.Statistics statistics;
|
||||
|
||||
@Deprecated
|
||||
public KFSImpl(String metaServerHost, int metaServerPort
|
||||
) throws IOException {
|
||||
this(metaServerHost, metaServerPort, null);
|
||||
}
|
||||
|
||||
public KFSImpl(String metaServerHost, int metaServerPort,
|
||||
FileSystem.Statistics stats) throws IOException {
|
||||
kfsAccess = new KfsAccess(metaServerHost, metaServerPort);
|
||||
statistics = stats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean exists(String path) throws IOException {
|
||||
return kfsAccess.kfs_exists(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDirectory(String path) throws IOException {
|
||||
return kfsAccess.kfs_isDirectory(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isFile(String path) throws IOException {
|
||||
return kfsAccess.kfs_isFile(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] readdir(String path) throws IOException {
|
||||
return kfsAccess.kfs_readdir(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileStatus[] readdirplus(Path path) throws IOException {
|
||||
String srep = path.toUri().getPath();
|
||||
KfsFileAttr[] fattr = kfsAccess.kfs_readdirplus(srep);
|
||||
if (fattr == null)
|
||||
return null;
|
||||
int numEntries = 0;
|
||||
for (int i = 0; i < fattr.length; i++) {
|
||||
if ((fattr[i].filename.compareTo(".") == 0) || (fattr[i].filename.compareTo("target/generated-sources") == 0))
|
||||
continue;
|
||||
numEntries++;
|
||||
}
|
||||
FileStatus[] fstatus = new FileStatus[numEntries];
|
||||
int j = 0;
|
||||
for (int i = 0; i < fattr.length; i++) {
|
||||
if ((fattr[i].filename.compareTo(".") == 0) || (fattr[i].filename.compareTo("target/generated-sources") == 0))
|
||||
continue;
|
||||
Path fn = new Path(path, fattr[i].filename);
|
||||
|
||||
if (fattr[i].isDirectory)
|
||||
fstatus[j] = new FileStatus(0, true, 1, 0, fattr[i].modificationTime, fn);
|
||||
else
|
||||
fstatus[j] = new FileStatus(fattr[i].filesize, fattr[i].isDirectory,
|
||||
fattr[i].replication,
|
||||
(long)
|
||||
(1 << 26),
|
||||
fattr[i].modificationTime,
|
||||
fn);
|
||||
|
||||
j++;
|
||||
}
|
||||
return fstatus;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int mkdirs(String path) throws IOException {
|
||||
return kfsAccess.kfs_mkdirs(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int rename(String source, String dest) throws IOException {
|
||||
return kfsAccess.kfs_rename(source, dest);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int rmdir(String path) throws IOException {
|
||||
return kfsAccess.kfs_rmdir(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int remove(String path) throws IOException {
|
||||
return kfsAccess.kfs_remove(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long filesize(String path) throws IOException {
|
||||
return kfsAccess.kfs_filesize(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public short getReplication(String path) throws IOException {
|
||||
return kfsAccess.kfs_getReplication(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public short setReplication(String path, short replication) throws IOException {
|
||||
return kfsAccess.kfs_setReplication(path, replication);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[][] getDataLocation(String path, long start, long len) throws IOException {
|
||||
return kfsAccess.kfs_getDataLocation(path, start, len);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getModificationTime(String path) throws IOException {
|
||||
return kfsAccess.kfs_getModificationTime(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataInputStream open(String path, int bufferSize) throws IOException {
|
||||
return new FSDataInputStream(new KFSInputStream(kfsAccess, path,
|
||||
statistics));
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataOutputStream create(String path, short replication, int bufferSize, Progressable progress) throws IOException {
|
||||
return new FSDataOutputStream(new KFSOutputStream(kfsAccess, path, replication, false, progress),
|
||||
statistics);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataOutputStream append(String path, int bufferSize, Progressable progress) throws IOException {
|
||||
// when opening for append, # of replicas is ignored
|
||||
return new FSDataOutputStream(new KFSOutputStream(kfsAccess, path, (short) 1, true, progress),
|
||||
statistics);
|
||||
}
|
||||
}
|
|
@ -1,143 +0,0 @@
|
|||
/**
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
* implied. See the License for the specific language governing
|
||||
* permissions and limitations under the License.
|
||||
*
|
||||
*
|
||||
* Implements the Hadoop FSInputStream interfaces to allow applications to read
|
||||
* files in Kosmos File System (KFS).
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.kfs;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FSInputStream;
|
||||
|
||||
import org.kosmix.kosmosfs.access.KfsAccess;
|
||||
import org.kosmix.kosmosfs.access.KfsInputChannel;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
class KFSInputStream extends FSInputStream {
|
||||
|
||||
private KfsInputChannel kfsChannel;
|
||||
private FileSystem.Statistics statistics;
|
||||
private long fsize;
|
||||
|
||||
@Deprecated
|
||||
public KFSInputStream(KfsAccess kfsAccess, String path) {
|
||||
this(kfsAccess, path, null);
|
||||
}
|
||||
|
||||
public KFSInputStream(KfsAccess kfsAccess, String path,
|
||||
FileSystem.Statistics stats) {
|
||||
this.statistics = stats;
|
||||
this.kfsChannel = kfsAccess.kfs_open(path);
|
||||
if (this.kfsChannel != null)
|
||||
this.fsize = kfsAccess.kfs_filesize(path);
|
||||
else
|
||||
this.fsize = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getPos() throws IOException {
|
||||
if (kfsChannel == null) {
|
||||
throw new IOException("File closed");
|
||||
}
|
||||
return kfsChannel.tell();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized int available() throws IOException {
|
||||
if (kfsChannel == null) {
|
||||
throw new IOException("File closed");
|
||||
}
|
||||
return (int) (this.fsize - getPos());
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void seek(long targetPos) throws IOException {
|
||||
if (kfsChannel == null) {
|
||||
throw new IOException("File closed");
|
||||
}
|
||||
kfsChannel.seek(targetPos);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean seekToNewSource(long targetPos) throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized int read() throws IOException {
|
||||
if (kfsChannel == null) {
|
||||
throw new IOException("File closed");
|
||||
}
|
||||
byte b[] = new byte[1];
|
||||
int res = read(b, 0, 1);
|
||||
if (res == 1) {
|
||||
if (statistics != null) {
|
||||
statistics.incrementBytesRead(1);
|
||||
}
|
||||
return b[0] & 0xff;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized int read(byte b[], int off, int len) throws IOException {
|
||||
if (kfsChannel == null) {
|
||||
throw new IOException("File closed");
|
||||
}
|
||||
int res;
|
||||
|
||||
res = kfsChannel.read(ByteBuffer.wrap(b, off, len));
|
||||
// Use -1 to signify EOF
|
||||
if (res == 0)
|
||||
return -1;
|
||||
if (statistics != null) {
|
||||
statistics.incrementBytesRead(res);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
if (kfsChannel == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
kfsChannel.close();
|
||||
kfsChannel = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean markSupported() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void mark(int readLimit) {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reset() throws IOException {
|
||||
throw new IOException("Mark not supported");
|
||||
}
|
||||
|
||||
}
|
|
@ -1,99 +0,0 @@
|
|||
/**
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
* implied. See the License for the specific language governing
|
||||
* permissions and limitations under the License.
|
||||
*
|
||||
*
|
||||
* Implements the Hadoop FSOutputStream interfaces to allow applications to write to
|
||||
* files in Kosmos File System (KFS).
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.kfs;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
||||
import org.kosmix.kosmosfs.access.KfsAccess;
|
||||
import org.kosmix.kosmosfs.access.KfsOutputChannel;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
class KFSOutputStream extends OutputStream {
|
||||
|
||||
private String path;
|
||||
private KfsOutputChannel kfsChannel;
|
||||
private Progressable progressReporter;
|
||||
|
||||
public KFSOutputStream(KfsAccess kfsAccess, String path, short replication,
|
||||
boolean append, Progressable prog) {
|
||||
this.path = path;
|
||||
|
||||
if ((append) && (kfsAccess.kfs_isFile(path)))
|
||||
this.kfsChannel = kfsAccess.kfs_append(path);
|
||||
else
|
||||
this.kfsChannel = kfsAccess.kfs_create(path, replication);
|
||||
this.progressReporter = prog;
|
||||
}
|
||||
|
||||
public long getPos() throws IOException {
|
||||
if (kfsChannel == null) {
|
||||
throw new IOException("File closed");
|
||||
}
|
||||
return kfsChannel.tell();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(int v) throws IOException {
|
||||
if (kfsChannel == null) {
|
||||
throw new IOException("File closed");
|
||||
}
|
||||
byte[] b = new byte[1];
|
||||
|
||||
b[0] = (byte) v;
|
||||
write(b, 0, 1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte b[], int off, int len) throws IOException {
|
||||
if (kfsChannel == null) {
|
||||
throw new IOException("File closed");
|
||||
}
|
||||
|
||||
// touch the progress before going into KFS since the call can block
|
||||
progressReporter.progress();
|
||||
kfsChannel.write(ByteBuffer.wrap(b, off, len));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flush() throws IOException {
|
||||
if (kfsChannel == null) {
|
||||
throw new IOException("File closed");
|
||||
}
|
||||
// touch the progress before going into KFS since the call can block
|
||||
progressReporter.progress();
|
||||
kfsChannel.sync();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
if (kfsChannel == null) {
|
||||
return;
|
||||
}
|
||||
flush();
|
||||
kfsChannel.close();
|
||||
kfsChannel = null;
|
||||
}
|
||||
}
|
|
@ -1,352 +0,0 @@
|
|||
/**
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
* implied. See the License for the specific language governing
|
||||
* permissions and limitations under the License.
|
||||
*
|
||||
*
|
||||
* Implements the Hadoop FS interfaces to allow applications to store
|
||||
*files in Kosmos File System (KFS).
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.kfs;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
||||
/**
|
||||
* A FileSystem backed by KFS.
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class KosmosFileSystem extends FileSystem {
|
||||
|
||||
private FileSystem localFs;
|
||||
private IFSImpl kfsImpl = null;
|
||||
private URI uri;
|
||||
private Path workingDir = new Path("/");
|
||||
|
||||
public KosmosFileSystem() {
|
||||
|
||||
}
|
||||
|
||||
KosmosFileSystem(IFSImpl fsimpl) {
|
||||
this.kfsImpl = fsimpl;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the protocol scheme for the FileSystem.
|
||||
* <p/>
|
||||
*
|
||||
* @return <code>kfs</code>
|
||||
*/
|
||||
@Override
|
||||
public String getScheme() {
|
||||
return "kfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
public URI getUri() {
|
||||
return uri;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize(URI uri, Configuration conf) throws IOException {
|
||||
super.initialize(uri, conf);
|
||||
try {
|
||||
if (kfsImpl == null) {
|
||||
if (uri.getHost() == null) {
|
||||
kfsImpl = new KFSImpl(conf.get("fs.kfs.metaServerHost", ""),
|
||||
conf.getInt("fs.kfs.metaServerPort", -1),
|
||||
statistics);
|
||||
} else {
|
||||
kfsImpl = new KFSImpl(uri.getHost(), uri.getPort(), statistics);
|
||||
}
|
||||
}
|
||||
|
||||
this.localFs = FileSystem.getLocal(conf);
|
||||
this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
|
||||
this.workingDir = new Path("/user", System.getProperty("user.name")
|
||||
).makeQualified(this);
|
||||
setConf(conf);
|
||||
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
System.out.println("Unable to initialize KFS");
|
||||
System.exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path getWorkingDirectory() {
|
||||
return workingDir;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setWorkingDirectory(Path dir) {
|
||||
workingDir = makeAbsolute(dir);
|
||||
}
|
||||
|
||||
private Path makeAbsolute(Path path) {
|
||||
if (path.isAbsolute()) {
|
||||
return path;
|
||||
}
|
||||
return new Path(workingDir, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean mkdirs(Path path, FsPermission permission
|
||||
) throws IOException {
|
||||
Path absolute = makeAbsolute(path);
|
||||
String srep = absolute.toUri().getPath();
|
||||
|
||||
int res;
|
||||
|
||||
// System.out.println("Calling mkdirs on: " + srep);
|
||||
|
||||
res = kfsImpl.mkdirs(srep);
|
||||
|
||||
return res == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDirectory(Path path) throws IOException {
|
||||
Path absolute = makeAbsolute(path);
|
||||
String srep = absolute.toUri().getPath();
|
||||
|
||||
// System.out.println("Calling isdir on: " + srep);
|
||||
|
||||
return kfsImpl.isDirectory(srep);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isFile(Path path) throws IOException {
|
||||
Path absolute = makeAbsolute(path);
|
||||
String srep = absolute.toUri().getPath();
|
||||
return kfsImpl.isFile(srep);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileStatus[] listStatus(Path path) throws IOException {
|
||||
Path absolute = makeAbsolute(path);
|
||||
String srep = absolute.toUri().getPath();
|
||||
|
||||
if(!kfsImpl.exists(srep))
|
||||
throw new FileNotFoundException("File " + path + " does not exist.");
|
||||
|
||||
if (kfsImpl.isFile(srep))
|
||||
return new FileStatus[] { getFileStatus(path) } ;
|
||||
|
||||
return kfsImpl.readdirplus(absolute);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileStatus getFileStatus(Path path) throws IOException {
|
||||
Path absolute = makeAbsolute(path);
|
||||
String srep = absolute.toUri().getPath();
|
||||
if (!kfsImpl.exists(srep)) {
|
||||
throw new FileNotFoundException("File " + path + " does not exist.");
|
||||
}
|
||||
if (kfsImpl.isDirectory(srep)) {
|
||||
// System.out.println("Status of path: " + path + " is dir");
|
||||
return new FileStatus(0, true, 1, 0, kfsImpl.getModificationTime(srep),
|
||||
path.makeQualified(this));
|
||||
} else {
|
||||
// System.out.println("Status of path: " + path + " is file");
|
||||
return new FileStatus(kfsImpl.filesize(srep), false,
|
||||
kfsImpl.getReplication(srep),
|
||||
getDefaultBlockSize(),
|
||||
kfsImpl.getModificationTime(srep),
|
||||
path.makeQualified(this));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataOutputStream append(Path f, int bufferSize,
|
||||
Progressable progress) throws IOException {
|
||||
Path parent = f.getParent();
|
||||
if (parent != null && !mkdirs(parent)) {
|
||||
throw new IOException("Mkdirs failed to create " + parent);
|
||||
}
|
||||
|
||||
Path absolute = makeAbsolute(f);
|
||||
String srep = absolute.toUri().getPath();
|
||||
|
||||
return kfsImpl.append(srep, bufferSize, progress);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataOutputStream create(Path file, FsPermission permission,
|
||||
boolean overwrite, int bufferSize,
|
||||
short replication, long blockSize, Progressable progress)
|
||||
throws IOException {
|
||||
|
||||
if (exists(file)) {
|
||||
if (overwrite) {
|
||||
delete(file, true);
|
||||
} else {
|
||||
throw new IOException("File already exists: " + file);
|
||||
}
|
||||
}
|
||||
|
||||
Path parent = file.getParent();
|
||||
if (parent != null && !mkdirs(parent)) {
|
||||
throw new IOException("Mkdirs failed to create " + parent);
|
||||
}
|
||||
|
||||
Path absolute = makeAbsolute(file);
|
||||
String srep = absolute.toUri().getPath();
|
||||
|
||||
return kfsImpl.create(srep, replication, bufferSize, progress);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
|
||||
if (!exists(path))
|
||||
throw new IOException("File does not exist: " + path);
|
||||
|
||||
Path absolute = makeAbsolute(path);
|
||||
String srep = absolute.toUri().getPath();
|
||||
|
||||
return kfsImpl.open(srep, bufferSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean rename(Path src, Path dst) throws IOException {
|
||||
Path absoluteS = makeAbsolute(src);
|
||||
String srepS = absoluteS.toUri().getPath();
|
||||
Path absoluteD = makeAbsolute(dst);
|
||||
String srepD = absoluteD.toUri().getPath();
|
||||
|
||||
// System.out.println("Calling rename on: " + srepS + " -> " + srepD);
|
||||
|
||||
return kfsImpl.rename(srepS, srepD) == 0;
|
||||
}
|
||||
|
||||
// recursively delete the directory and its contents
|
||||
@Override
|
||||
public boolean delete(Path path, boolean recursive) throws IOException {
|
||||
Path absolute = makeAbsolute(path);
|
||||
String srep = absolute.toUri().getPath();
|
||||
if (kfsImpl.isFile(srep))
|
||||
return kfsImpl.remove(srep) == 0;
|
||||
|
||||
FileStatus[] dirEntries = listStatus(absolute);
|
||||
if (!recursive && (dirEntries.length != 0)) {
|
||||
throw new IOException("Directory " + path.toString() +
|
||||
" is not empty.");
|
||||
}
|
||||
|
||||
for (int i = 0; i < dirEntries.length; i++) {
|
||||
delete(new Path(absolute, dirEntries[i].getPath()), recursive);
|
||||
}
|
||||
return kfsImpl.rmdir(srep) == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short getDefaultReplication() {
|
||||
return 3;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean setReplication(Path path, short replication)
|
||||
throws IOException {
|
||||
|
||||
Path absolute = makeAbsolute(path);
|
||||
String srep = absolute.toUri().getPath();
|
||||
|
||||
int res = kfsImpl.setReplication(srep, replication);
|
||||
return res >= 0;
|
||||
}
|
||||
|
||||
// 64MB is the KFS block size
|
||||
|
||||
@Override
|
||||
public long getDefaultBlockSize() {
|
||||
return 1 << 26;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public void lock(Path path, boolean shared) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public void release(Path path) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Return null if the file doesn't exist; otherwise, get the
|
||||
* locations of the various chunks of the file file from KFS.
|
||||
*/
|
||||
@Override
|
||||
public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
|
||||
long len) throws IOException {
|
||||
|
||||
if (file == null) {
|
||||
return null;
|
||||
}
|
||||
String srep = makeAbsolute(file.getPath()).toUri().getPath();
|
||||
String[][] hints = kfsImpl.getDataLocation(srep, start, len);
|
||||
if (hints == null) {
|
||||
return null;
|
||||
}
|
||||
BlockLocation[] result = new BlockLocation[hints.length];
|
||||
long blockSize = getDefaultBlockSize();
|
||||
long length = len;
|
||||
long blockStart = start;
|
||||
for(int i=0; i < result.length; ++i) {
|
||||
result[i] = new BlockLocation(null, hints[i], blockStart,
|
||||
length < blockSize ? length : blockSize);
|
||||
blockStart += blockSize;
|
||||
length -= blockSize;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws IOException {
|
||||
FileUtil.copy(localFs, src, this, dst, delSrc, getConf());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyToLocalFile(boolean delSrc, Path src, Path dst) throws IOException {
|
||||
FileUtil.copy(this, src, localFs, dst, delSrc, getConf());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
|
||||
throws IOException {
|
||||
return tmpLocalFile;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
|
||||
throws IOException {
|
||||
moveFromLocalFile(tmpLocalFile, fsOutputFile);
|
||||
}
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
<html>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<head></head>
|
||||
<body>
|
||||
<h1>A client for the Kosmos filesystem (KFS)</h1>
|
||||
|
||||
<h3>Introduction</h3>
|
||||
|
||||
This pages describes how to use Kosmos Filesystem
|
||||
(<a href="http://kosmosfs.sourceforge.net"> KFS </a>) as a backing
|
||||
store with Hadoop. This page assumes that you have downloaded the
|
||||
KFS software and installed necessary binaries as outlined in the KFS
|
||||
documentation.
|
||||
|
||||
<h3>Steps</h3>
|
||||
|
||||
<ul>
|
||||
<li>In the Hadoop conf directory edit core-site.xml,
|
||||
add the following:
|
||||
<pre>
|
||||
<property>
|
||||
<name>fs.kfs.impl</name>
|
||||
<value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value>
|
||||
<description>The FileSystem for kfs: uris.</description>
|
||||
</property>
|
||||
</pre>
|
||||
|
||||
<li>In the Hadoop conf directory edit core-site.xml,
|
||||
adding the following (with appropriate values for
|
||||
<server> and <port>):
|
||||
<pre>
|
||||
<property>
|
||||
<name>fs.default.name</name>
|
||||
<value>kfs://<server:port></value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.kfs.metaServerHost</name>
|
||||
<value><server></value>
|
||||
<description>The location of the KFS meta server.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.kfs.metaServerPort</name>
|
||||
<value><port></value>
|
||||
<description>The location of the meta server's port.</description>
|
||||
</property>
|
||||
|
||||
</pre>
|
||||
</li>
|
||||
|
||||
<li>Copy KFS's <i> kfs-0.1.jar </i> to Hadoop's lib directory. This step
|
||||
enables Hadoop's to load the KFS specific modules. Note
|
||||
that, kfs-0.1.jar was built when you compiled KFS source
|
||||
code. This jar file contains code that calls KFS's client
|
||||
library code via JNI; the native code is in KFS's <i>
|
||||
libkfsClient.so </i> library.
|
||||
</li>
|
||||
|
||||
<li> When the Hadoop map/reduce trackers start up, those
|
||||
processes (on local as well as remote nodes) will now need to load
|
||||
KFS's <i> libkfsClient.so </i> library. To simplify this process, it is advisable to
|
||||
store libkfsClient.so in an NFS accessible directory (similar to where
|
||||
Hadoop binaries/scripts are stored); then, modify Hadoop's
|
||||
conf/hadoop-env.sh adding the following line and providing suitable
|
||||
value for <path>:
|
||||
<pre>
|
||||
export LD_LIBRARY_PATH=<path>
|
||||
</pre>
|
||||
|
||||
|
||||
<li>Start only the map/reduce trackers
|
||||
<br />
|
||||
example: execute Hadoop's bin/start-mapred.sh</li>
|
||||
</ul>
|
||||
<br/>
|
||||
|
||||
If the map/reduce job trackers start up, all file-I/O is done to KFS.
|
||||
|
||||
</body>
|
||||
</html>
|
|
@ -89,7 +89,11 @@ class ChRootedFileSystem extends FilterFileSystem {
|
|||
public ChRootedFileSystem(final URI uri, Configuration conf)
|
||||
throws IOException {
|
||||
super(FileSystem.get(uri, conf));
|
||||
chRootPathPart = new Path(uri.getPath());
|
||||
String pathString = uri.getPath();
|
||||
if (pathString.isEmpty()) {
|
||||
pathString = "/";
|
||||
}
|
||||
chRootPathPart = new Path(pathString);
|
||||
chRootPathPartString = chRootPathPart.toUri().getPath();
|
||||
myUri = uri;
|
||||
workingDir = getHomeDirectory();
|
||||
|
|
|
@ -205,9 +205,13 @@ public class ViewFs extends AbstractFileSystem {
|
|||
protected
|
||||
AbstractFileSystem getTargetFileSystem(final URI uri)
|
||||
throws URISyntaxException, UnsupportedFileSystemException {
|
||||
String pathString = uri.getPath();
|
||||
if (pathString.isEmpty()) {
|
||||
pathString = "/";
|
||||
}
|
||||
return new ChRootedFs(
|
||||
AbstractFileSystem.createFileSystem(uri, config),
|
||||
new Path(uri.getPath()));
|
||||
new Path(pathString));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.io.OutputStream;
|
|||
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.compress.snappy.LoadSnappy;
|
||||
import org.apache.hadoop.io.compress.snappy.SnappyCompressor;
|
||||
import org.apache.hadoop.io.compress.snappy.SnappyDecompressor;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
|
@ -34,11 +33,6 @@ import org.apache.hadoop.util.NativeCodeLoader;
|
|||
* This class creates snappy compressors/decompressors.
|
||||
*/
|
||||
public class SnappyCodec implements Configurable, CompressionCodec {
|
||||
|
||||
static {
|
||||
LoadSnappy.isLoaded();
|
||||
}
|
||||
|
||||
Configuration conf;
|
||||
|
||||
/**
|
||||
|
@ -63,11 +57,26 @@ public class SnappyCodec implements Configurable, CompressionCodec {
|
|||
|
||||
/**
|
||||
* Are the native snappy libraries loaded & initialized?
|
||||
*
|
||||
* @return true if loaded & initialized, otherwise false
|
||||
*/
|
||||
public static void checkNativeCodeLoaded() {
|
||||
if (!NativeCodeLoader.buildSupportsSnappy()) {
|
||||
throw new RuntimeException("native snappy library not available: " +
|
||||
"this version of libhadoop was built without " +
|
||||
"snappy support.");
|
||||
}
|
||||
if (!SnappyCompressor.isNativeCodeLoaded()) {
|
||||
throw new RuntimeException("native snappy library not available: " +
|
||||
"SnappyCompressor has not been loaded.");
|
||||
}
|
||||
if (!SnappyDecompressor.isNativeCodeLoaded()) {
|
||||
throw new RuntimeException("native snappy library not available: " +
|
||||
"SnappyDecompressor has not been loaded.");
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean isNativeCodeLoaded() {
|
||||
return LoadSnappy.isLoaded() && NativeCodeLoader.isNativeCodeLoaded();
|
||||
return SnappyCompressor.isNativeCodeLoaded() &&
|
||||
SnappyDecompressor.isNativeCodeLoaded();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -97,9 +106,7 @@ public class SnappyCodec implements Configurable, CompressionCodec {
|
|||
public CompressionOutputStream createOutputStream(OutputStream out,
|
||||
Compressor compressor)
|
||||
throws IOException {
|
||||
if (!isNativeCodeLoaded()) {
|
||||
throw new RuntimeException("native snappy library not available");
|
||||
}
|
||||
checkNativeCodeLoaded();
|
||||
int bufferSize = conf.getInt(
|
||||
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
|
||||
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
|
||||
|
@ -117,10 +124,7 @@ public class SnappyCodec implements Configurable, CompressionCodec {
|
|||
*/
|
||||
@Override
|
||||
public Class<? extends Compressor> getCompressorType() {
|
||||
if (!isNativeCodeLoaded()) {
|
||||
throw new RuntimeException("native snappy library not available");
|
||||
}
|
||||
|
||||
checkNativeCodeLoaded();
|
||||
return SnappyCompressor.class;
|
||||
}
|
||||
|
||||
|
@ -131,9 +135,7 @@ public class SnappyCodec implements Configurable, CompressionCodec {
|
|||
*/
|
||||
@Override
|
||||
public Compressor createCompressor() {
|
||||
if (!isNativeCodeLoaded()) {
|
||||
throw new RuntimeException("native snappy library not available");
|
||||
}
|
||||
checkNativeCodeLoaded();
|
||||
int bufferSize = conf.getInt(
|
||||
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
|
||||
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
|
||||
|
@ -167,10 +169,7 @@ public class SnappyCodec implements Configurable, CompressionCodec {
|
|||
public CompressionInputStream createInputStream(InputStream in,
|
||||
Decompressor decompressor)
|
||||
throws IOException {
|
||||
if (!isNativeCodeLoaded()) {
|
||||
throw new RuntimeException("native snappy library not available");
|
||||
}
|
||||
|
||||
checkNativeCodeLoaded();
|
||||
return new BlockDecompressorStream(in, decompressor, conf.getInt(
|
||||
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
|
||||
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT));
|
||||
|
@ -183,10 +182,7 @@ public class SnappyCodec implements Configurable, CompressionCodec {
|
|||
*/
|
||||
@Override
|
||||
public Class<? extends Decompressor> getDecompressorType() {
|
||||
if (!isNativeCodeLoaded()) {
|
||||
throw new RuntimeException("native snappy library not available");
|
||||
}
|
||||
|
||||
checkNativeCodeLoaded();
|
||||
return SnappyDecompressor.class;
|
||||
}
|
||||
|
||||
|
@ -197,9 +193,7 @@ public class SnappyCodec implements Configurable, CompressionCodec {
|
|||
*/
|
||||
@Override
|
||||
public Decompressor createDecompressor() {
|
||||
if (!isNativeCodeLoaded()) {
|
||||
throw new RuntimeException("native snappy library not available");
|
||||
}
|
||||
checkNativeCodeLoaded();
|
||||
int bufferSize = conf.getInt(
|
||||
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
|
||||
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io.compress.snappy;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
|
||||
/**
|
||||
* Determines if Snappy native library is available and loads it if available.
|
||||
*/
|
||||
public class LoadSnappy {
|
||||
private static final Log LOG = LogFactory.getLog(LoadSnappy.class.getName());
|
||||
|
||||
private static boolean AVAILABLE = false;
|
||||
private static boolean LOADED = false;
|
||||
|
||||
static {
|
||||
try {
|
||||
System.loadLibrary("snappy");
|
||||
LOG.warn("Snappy native library is available");
|
||||
AVAILABLE = true;
|
||||
} catch (UnsatisfiedLinkError ex) {
|
||||
//NOP
|
||||
}
|
||||
boolean hadoopNativeAvailable = NativeCodeLoader.isNativeCodeLoaded();
|
||||
LOADED = AVAILABLE && hadoopNativeAvailable;
|
||||
if (LOADED) {
|
||||
LOG.info("Snappy native library loaded");
|
||||
} else {
|
||||
LOG.warn("Snappy native library not loaded");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns if Snappy native library is loaded.
|
||||
*
|
||||
* @return <code>true</code> if Snappy native library is loaded,
|
||||
* <code>false</code> if not.
|
||||
*/
|
||||
public static boolean isAvailable() {
|
||||
return AVAILABLE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns if Snappy native library is loaded.
|
||||
*
|
||||
* @return <code>true</code> if Snappy native library is loaded,
|
||||
* <code>false</code> if not.
|
||||
*/
|
||||
public static boolean isLoaded() {
|
||||
return LOADED;
|
||||
}
|
||||
|
||||
}
|
|
@ -26,6 +26,7 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
|
||||
/**
|
||||
* A {@link Compressor} based on the snappy compression algorithm.
|
||||
|
@ -51,22 +52,24 @@ public class SnappyCompressor implements Compressor {
|
|||
private long bytesRead = 0L;
|
||||
private long bytesWritten = 0L;
|
||||
|
||||
|
||||
private static boolean nativeSnappyLoaded = false;
|
||||
|
||||
static {
|
||||
if (LoadSnappy.isLoaded()) {
|
||||
// Initialize the native library
|
||||
if (NativeCodeLoader.isNativeCodeLoaded() &&
|
||||
NativeCodeLoader.buildSupportsSnappy()) {
|
||||
try {
|
||||
initIDs();
|
||||
nativeSnappyLoaded = true;
|
||||
} catch (Throwable t) {
|
||||
// Ignore failure to load/initialize snappy
|
||||
LOG.warn(t.toString());
|
||||
LOG.error("failed to load SnappyCompressor", t);
|
||||
}
|
||||
} else {
|
||||
LOG.error("Cannot load " + SnappyCompressor.class.getName() +
|
||||
" without snappy library!");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static boolean isNativeCodeLoaded() {
|
||||
return nativeSnappyLoaded;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new compressor.
|
||||
*
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.nio.ByteBuffer;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.io.compress.Decompressor;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
|
||||
/**
|
||||
* A {@link Decompressor} based on the snappy compression algorithm.
|
||||
|
@ -47,21 +48,24 @@ public class SnappyDecompressor implements Decompressor {
|
|||
private int userBufOff = 0, userBufLen = 0;
|
||||
private boolean finished;
|
||||
|
||||
private static boolean nativeSnappyLoaded = false;
|
||||
|
||||
static {
|
||||
if (LoadSnappy.isLoaded()) {
|
||||
// Initialize the native library
|
||||
if (NativeCodeLoader.isNativeCodeLoaded() &&
|
||||
NativeCodeLoader.buildSupportsSnappy()) {
|
||||
try {
|
||||
initIDs();
|
||||
nativeSnappyLoaded = true;
|
||||
} catch (Throwable t) {
|
||||
// Ignore failure to load/initialize snappy
|
||||
LOG.warn(t.toString());
|
||||
LOG.error("failed to load SnappyDecompressor", t);
|
||||
}
|
||||
} else {
|
||||
LOG.error("Cannot load " + SnappyDecompressor.class.getName() +
|
||||
" without snappy library!");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static boolean isNativeCodeLoaded() {
|
||||
return nativeSnappyLoaded;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new compressor.
|
||||
*
|
||||
|
|
|
@ -87,7 +87,6 @@ import org.apache.hadoop.security.SaslRpcServer.SaslDigestCallbackHandler;
|
|||
import org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
|
||||
import org.apache.hadoop.security.SaslRpcServer.SaslStatus;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||
import org.apache.hadoop.security.authorize.PolicyProvider;
|
||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||
|
@ -1374,20 +1373,38 @@ public abstract class Server {
|
|||
dataLengthBuffer.clear();
|
||||
if (authMethod == null) {
|
||||
throw new IOException("Unable to read authentication method");
|
||||
}
|
||||
if (isSecurityEnabled && authMethod == AuthMethod.SIMPLE) {
|
||||
AccessControlException ae = new AccessControlException("Authorization ("
|
||||
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION
|
||||
+ ") is enabled but authentication ("
|
||||
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
|
||||
+ ") is configured as simple. Please configure another method "
|
||||
+ "like kerberos or digest.");
|
||||
setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL,
|
||||
null, ae.getClass().getName(), ae.getMessage());
|
||||
responder.doRespond(authFailedCall);
|
||||
throw ae;
|
||||
}
|
||||
if (!isSecurityEnabled && authMethod != AuthMethod.SIMPLE) {
|
||||
}
|
||||
final boolean clientUsingSasl;
|
||||
switch (authMethod) {
|
||||
case SIMPLE: { // no sasl for simple
|
||||
if (isSecurityEnabled) {
|
||||
AccessControlException ae = new AccessControlException("Authorization ("
|
||||
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION
|
||||
+ ") is enabled but authentication ("
|
||||
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
|
||||
+ ") is configured as simple. Please configure another method "
|
||||
+ "like kerberos or digest.");
|
||||
setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL,
|
||||
null, ae.getClass().getName(), ae.getMessage());
|
||||
responder.doRespond(authFailedCall);
|
||||
throw ae;
|
||||
}
|
||||
clientUsingSasl = false;
|
||||
useSasl = false;
|
||||
break;
|
||||
}
|
||||
case DIGEST: {
|
||||
clientUsingSasl = true;
|
||||
useSasl = (secretManager != null);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
clientUsingSasl = true;
|
||||
useSasl = isSecurityEnabled;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (clientUsingSasl && !useSasl) {
|
||||
doSaslReply(SaslStatus.SUCCESS, new IntWritable(
|
||||
SaslRpcServer.SWITCH_TO_SIMPLE_AUTH), null, null);
|
||||
authMethod = AuthMethod.SIMPLE;
|
||||
|
@ -1396,9 +1413,6 @@ public abstract class Server {
|
|||
// to simple auth from now on.
|
||||
skipInitialSaslHandshake = true;
|
||||
}
|
||||
if (authMethod != AuthMethod.SIMPLE) {
|
||||
useSasl = true;
|
||||
}
|
||||
|
||||
connectionHeaderBuf = null;
|
||||
connectionHeaderRead = true;
|
||||
|
@ -1532,8 +1546,6 @@ public abstract class Server {
|
|||
UserGroupInformation realUser = user;
|
||||
user = UserGroupInformation.createProxyUser(protocolUser
|
||||
.getUserName(), realUser);
|
||||
// Now the user is a proxy user, set Authentication method Proxy.
|
||||
user.setAuthenticationMethod(AuthenticationMethod.PROXY);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1883,7 +1895,7 @@ public abstract class Server {
|
|||
// Create the responder here
|
||||
responder = new Responder();
|
||||
|
||||
if (isSecurityEnabled) {
|
||||
if (secretManager != null) {
|
||||
SaslRpcServer.init(conf);
|
||||
}
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.metrics2.util.Quantile;
|
|||
import org.apache.hadoop.metrics2.util.SampleQuantiles;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
|
||||
/**
|
||||
* Watches a stream of long values, maintaining online estimates of specific
|
||||
|
@ -60,8 +61,9 @@ public class MutableQuantiles extends MutableMetric {
|
|||
@VisibleForTesting
|
||||
protected Map<Quantile, Long> previousSnapshot = null;
|
||||
|
||||
private final ScheduledExecutorService scheduler = Executors
|
||||
.newScheduledThreadPool(1);
|
||||
private static final ScheduledExecutorService scheduler = Executors
|
||||
.newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true)
|
||||
.setNameFormat("MutableQuantiles-%d").build());
|
||||
|
||||
/**
|
||||
* Instantiates a new {@link MutableQuantiles} for a metric that rolls itself
|
||||
|
|
|
@ -210,9 +210,12 @@ public class SampleQuantiles {
|
|||
int rankMin = 0;
|
||||
int desired = (int) (quantile * count);
|
||||
|
||||
ListIterator<SampleItem> it = samples.listIterator();
|
||||
SampleItem prev = null;
|
||||
SampleItem cur = it.next();
|
||||
for (int i = 1; i < samples.size(); i++) {
|
||||
SampleItem prev = samples.get(i - 1);
|
||||
SampleItem cur = samples.get(i);
|
||||
prev = cur;
|
||||
cur = it.next();
|
||||
|
||||
rankMin += prev.g;
|
||||
|
||||
|
|
|
@ -452,7 +452,7 @@ public class SecurityUtil {
|
|||
return action.run();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Perform the given action as the daemon's login user. If an
|
||||
* InterruptedException is thrown, it is converted to an IOException.
|
||||
|
@ -499,7 +499,7 @@ public class SecurityUtil {
|
|||
* @throws IOException If unable to authenticate via SPNEGO
|
||||
*/
|
||||
public static URLConnection openSecureHttpConnection(URL url) throws IOException {
|
||||
if(!UserGroupInformation.isSecurityEnabled()) {
|
||||
if (!HttpConfig.isSecure() && !UserGroupInformation.isSecurityEnabled()) {
|
||||
return url.openConnection();
|
||||
}
|
||||
|
||||
|
|
|
@ -160,7 +160,7 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
|
|||
} finally {
|
||||
is.close();
|
||||
}
|
||||
LOG.info(mode.toString() + " Loaded KeyStore: " + keystoreLocation);
|
||||
LOG.debug(mode.toString() + " Loaded KeyStore: " + keystoreLocation);
|
||||
} else {
|
||||
keystore.load(null, null);
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
|
|||
truststorePassword,
|
||||
truststoreReloadInterval);
|
||||
trustManager.init();
|
||||
LOG.info(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
|
||||
LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
|
||||
|
||||
trustManagers = new TrustManager[]{trustManager};
|
||||
}
|
||||
|
|
|
@ -74,6 +74,11 @@ public class NativeCodeLoader {
|
|||
return nativeCodeLoaded;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true only if this build was compiled with support for snappy.
|
||||
*/
|
||||
public static native boolean buildSupportsSnappy();
|
||||
|
||||
/**
|
||||
* Return if native hadoop libraries, if present, can be used for this job.
|
||||
* @param conf configuration
|
||||
|
|
|
@ -34,6 +34,7 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.StringTokenizer;
|
||||
|
||||
import com.google.common.net.InetAddresses;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -77,6 +78,9 @@ public class StringUtils {
|
|||
* @return the hostname to the first dot
|
||||
*/
|
||||
public static String simpleHostname(String fullHostname) {
|
||||
if (InetAddresses.isInetAddress(fullHostname)) {
|
||||
return fullHostname;
|
||||
}
|
||||
int offset = fullHostname.indexOf('.');
|
||||
if (offset != -1) {
|
||||
return fullHostname.substring(0, offset);
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_buildSupportsSnappy
|
||||
(JNIEnv *env, jclass clazz)
|
||||
{
|
||||
#ifdef HADOOP_SNAPPY_LIBRARY
|
||||
return JNI_TRUE;
|
||||
#else
|
||||
return JNI_FALSE;
|
||||
#endif
|
||||
}
|
|
@ -17,6 +17,5 @@ org.apache.hadoop.fs.LocalFileSystem
|
|||
org.apache.hadoop.fs.viewfs.ViewFileSystem
|
||||
org.apache.hadoop.fs.s3.S3FileSystem
|
||||
org.apache.hadoop.fs.s3native.NativeS3FileSystem
|
||||
org.apache.hadoop.fs.kfs.KosmosFileSystem
|
||||
org.apache.hadoop.fs.ftp.FTPFileSystem
|
||||
org.apache.hadoop.fs.HarFileSystem
|
||||
|
|
|
@ -774,42 +774,6 @@
|
|||
<description>Replication factor</description>
|
||||
</property>
|
||||
|
||||
<!-- Kosmos File System -->
|
||||
|
||||
<property>
|
||||
<name>kfs.stream-buffer-size</name>
|
||||
<value>4096</value>
|
||||
<description>The size of buffer to stream files.
|
||||
The size of this buffer should probably be a multiple of hardware
|
||||
page size (4096 on Intel x86), and it determines how much data is
|
||||
buffered during read and write operations.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>kfs.bytes-per-checksum</name>
|
||||
<value>512</value>
|
||||
<description>The number of bytes per checksum. Must not be larger than
|
||||
kfs.stream-buffer-size</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>kfs.client-write-packet-size</name>
|
||||
<value>65536</value>
|
||||
<description>Packet size for clients to write</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>kfs.blocksize</name>
|
||||
<value>67108864</value>
|
||||
<description>Block size</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>kfs.replication</name>
|
||||
<value>3</value>
|
||||
<description>Replication factor</description>
|
||||
</property>
|
||||
|
||||
<!-- FTP file system -->
|
||||
<property>
|
||||
<name>ftp.stream-buffer-size</name>
|
||||
|
|
|
@ -26,6 +26,8 @@ import java.io.File;
|
|||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.net.URI;
|
||||
import java.text.DateFormat;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -434,6 +436,36 @@ public class TestTrash extends TestCase {
|
|||
output.indexOf("Failed to determine server trash configuration") != -1);
|
||||
}
|
||||
|
||||
// Verify old checkpoint format is recognized
|
||||
{
|
||||
// emulate two old trash checkpoint directories, one that is old enough
|
||||
// to be deleted on the next expunge and one that isn't.
|
||||
long trashInterval = conf.getLong(FS_TRASH_INTERVAL_KEY,
|
||||
FS_TRASH_INTERVAL_DEFAULT);
|
||||
long now = Time.now();
|
||||
DateFormat oldCheckpointFormat = new SimpleDateFormat("yyMMddHHmm");
|
||||
Path dirToDelete = new Path(trashRoot.getParent(),
|
||||
oldCheckpointFormat.format(now - (trashInterval * 60 * 1000) - 1));
|
||||
Path dirToKeep = new Path(trashRoot.getParent(),
|
||||
oldCheckpointFormat.format(now));
|
||||
mkdir(trashRootFs, dirToDelete);
|
||||
mkdir(trashRootFs, dirToKeep);
|
||||
|
||||
// Clear out trash
|
||||
int rc = -1;
|
||||
try {
|
||||
rc = shell.run(new String [] { "-expunge" } );
|
||||
} catch (Exception e) {
|
||||
System.err.println("Exception raised from fs expunge " +
|
||||
e.getLocalizedMessage());
|
||||
}
|
||||
assertEquals(0, rc);
|
||||
assertFalse("old checkpoint format not recognized",
|
||||
trashRootFs.exists(dirToDelete));
|
||||
assertTrue("old checkpoint format directory should not be removed",
|
||||
trashRootFs.exists(dirToKeep));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static void trashNonDefaultFS(Configuration conf) throws IOException {
|
||||
|
|
|
@ -1,168 +0,0 @@
|
|||
/**
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
* implied. See the License for the specific language governing
|
||||
* permissions and limitations under the License.
|
||||
*
|
||||
*
|
||||
* We need to provide the ability to the code in fs/kfs without really
|
||||
* having a KFS deployment. For this purpose, use the LocalFileSystem
|
||||
* as a way to "emulate" KFS.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.kfs;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
||||
public class KFSEmulationImpl implements IFSImpl {
|
||||
FileSystem localFS;
|
||||
|
||||
public KFSEmulationImpl(Configuration conf) throws IOException {
|
||||
localFS = FileSystem.getLocal(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean exists(String path) throws IOException {
|
||||
return localFS.exists(new Path(path));
|
||||
}
|
||||
@Override
|
||||
public boolean isDirectory(String path) throws IOException {
|
||||
return localFS.isDirectory(new Path(path));
|
||||
}
|
||||
@Override
|
||||
public boolean isFile(String path) throws IOException {
|
||||
return localFS.isFile(new Path(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] readdir(String path) throws IOException {
|
||||
FileStatus[] p = localFS.listStatus(new Path(path));
|
||||
try {
|
||||
p = localFS.listStatus(new Path(path));
|
||||
} catch ( FileNotFoundException fnfe ) {
|
||||
return null;
|
||||
}
|
||||
String[] entries = null;
|
||||
|
||||
entries = new String[p.length];
|
||||
for (int i = 0; i < p.length; i++)
|
||||
entries[i] = p[i].getPath().toString();
|
||||
return entries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileStatus[] readdirplus(Path path) throws IOException {
|
||||
return localFS.listStatus(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int mkdirs(String path) throws IOException {
|
||||
if (localFS.mkdirs(new Path(path)))
|
||||
return 0;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int rename(String source, String dest) throws IOException {
|
||||
if (localFS.rename(new Path(source), new Path(dest)))
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int rmdir(String path) throws IOException {
|
||||
if (isDirectory(path)) {
|
||||
// the directory better be empty
|
||||
String[] dirEntries = readdir(path);
|
||||
if ((dirEntries.length <= 2) && (localFS.delete(new Path(path), true)))
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int remove(String path) throws IOException {
|
||||
if (isFile(path) && (localFS.delete(new Path(path), true)))
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long filesize(String path) throws IOException {
|
||||
return localFS.getFileStatus(new Path(path)).getLen();
|
||||
}
|
||||
@Override
|
||||
public short getReplication(String path) throws IOException {
|
||||
return 1;
|
||||
}
|
||||
@Override
|
||||
public short setReplication(String path, short replication) throws IOException {
|
||||
return 1;
|
||||
}
|
||||
@Override
|
||||
public String[][] getDataLocation(String path, long start, long len) throws IOException {
|
||||
BlockLocation[] blkLocations =
|
||||
localFS.getFileBlockLocations(localFS.getFileStatus(new Path(path)),
|
||||
start, len);
|
||||
if ((blkLocations == null) || (blkLocations.length == 0)) {
|
||||
return new String[0][];
|
||||
}
|
||||
int blkCount = blkLocations.length;
|
||||
String[][]hints = new String[blkCount][];
|
||||
for (int i=0; i < blkCount ; i++) {
|
||||
String[] hosts = blkLocations[i].getHosts();
|
||||
hints[i] = new String[hosts.length];
|
||||
hints[i] = hosts;
|
||||
}
|
||||
return hints;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getModificationTime(String path) throws IOException {
|
||||
FileStatus s = localFS.getFileStatus(new Path(path));
|
||||
if (s == null)
|
||||
return 0;
|
||||
|
||||
return s.getModificationTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataOutputStream append(String path, int bufferSize, Progressable progress) throws IOException {
|
||||
// besides path/overwrite, the other args don't matter for
|
||||
// testing purposes.
|
||||
return localFS.append(new Path(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataOutputStream create(String path, short replication, int bufferSize, Progressable progress) throws IOException {
|
||||
// besides path/overwrite, the other args don't matter for
|
||||
// testing purposes.
|
||||
return localFS.create(new Path(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataInputStream open(String path, int bufferSize) throws IOException {
|
||||
return localFS.open(new Path(path));
|
||||
}
|
||||
|
||||
|
||||
};
|
|
@ -1,199 +0,0 @@
|
|||
/**
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
* implied. See the License for the specific language governing
|
||||
* permissions and limitations under the License.
|
||||
*
|
||||
*
|
||||
* Unit tests for testing the KosmosFileSystem API implementation.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.kfs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
public class TestKosmosFileSystem extends TestCase {
|
||||
|
||||
KosmosFileSystem kosmosFileSystem;
|
||||
KFSEmulationImpl kfsEmul;
|
||||
Path baseDir;
|
||||
|
||||
@Override
|
||||
protected void setUp() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
|
||||
kfsEmul = new KFSEmulationImpl(conf);
|
||||
kosmosFileSystem = new KosmosFileSystem(kfsEmul);
|
||||
// a dummy URI; we are not connecting to any setup here
|
||||
kosmosFileSystem.initialize(URI.create("kfs:///"), conf);
|
||||
baseDir = new Path(System.getProperty("test.build.data", "/tmp" ) +
|
||||
"/kfs-test");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
|
||||
}
|
||||
|
||||
// @Test
|
||||
// Check all the directory API's in KFS
|
||||
public void testDirs() throws Exception {
|
||||
Path subDir1 = new Path("dir.1");
|
||||
|
||||
// make the dir
|
||||
kosmosFileSystem.mkdirs(baseDir);
|
||||
assertTrue(kosmosFileSystem.isDirectory(baseDir));
|
||||
kosmosFileSystem.setWorkingDirectory(baseDir);
|
||||
|
||||
kosmosFileSystem.mkdirs(subDir1);
|
||||
assertTrue(kosmosFileSystem.isDirectory(subDir1));
|
||||
|
||||
assertFalse(kosmosFileSystem.exists(new Path("test1")));
|
||||
assertFalse(kosmosFileSystem.isDirectory(new Path("test/dir.2")));
|
||||
|
||||
FileStatus[] p = kosmosFileSystem.listStatus(baseDir);
|
||||
assertEquals(p.length, 1);
|
||||
|
||||
kosmosFileSystem.delete(baseDir, true);
|
||||
assertFalse(kosmosFileSystem.exists(baseDir));
|
||||
}
|
||||
|
||||
// @Test
|
||||
// Check the file API's
|
||||
public void testFiles() throws Exception {
|
||||
Path subDir1 = new Path("dir.1");
|
||||
Path file1 = new Path("dir.1/foo.1");
|
||||
Path file2 = new Path("dir.1/foo.2");
|
||||
|
||||
kosmosFileSystem.mkdirs(baseDir);
|
||||
assertTrue(kosmosFileSystem.isDirectory(baseDir));
|
||||
kosmosFileSystem.setWorkingDirectory(baseDir);
|
||||
|
||||
kosmosFileSystem.mkdirs(subDir1);
|
||||
|
||||
FSDataOutputStream s1 = kosmosFileSystem.create(file1, true, 4096, (short) 1, (long) 4096, null);
|
||||
FSDataOutputStream s2 = kosmosFileSystem.create(file2, true, 4096, (short) 1, (long) 4096, null);
|
||||
|
||||
s1.close();
|
||||
s2.close();
|
||||
|
||||
FileStatus[] p = kosmosFileSystem.listStatus(subDir1);
|
||||
assertEquals(p.length, 2);
|
||||
|
||||
kosmosFileSystem.delete(file1, true);
|
||||
p = kosmosFileSystem.listStatus(subDir1);
|
||||
assertEquals(p.length, 1);
|
||||
|
||||
kosmosFileSystem.delete(file2, true);
|
||||
p = kosmosFileSystem.listStatus(subDir1);
|
||||
assertEquals(p.length, 0);
|
||||
|
||||
kosmosFileSystem.delete(baseDir, true);
|
||||
assertFalse(kosmosFileSystem.exists(baseDir));
|
||||
}
|
||||
|
||||
// @Test
|
||||
// Check file/read write
|
||||
public void testFileIO() throws Exception {
|
||||
Path subDir1 = new Path("dir.1");
|
||||
Path file1 = new Path("dir.1/foo.1");
|
||||
|
||||
kosmosFileSystem.mkdirs(baseDir);
|
||||
assertTrue(kosmosFileSystem.isDirectory(baseDir));
|
||||
kosmosFileSystem.setWorkingDirectory(baseDir);
|
||||
|
||||
kosmosFileSystem.mkdirs(subDir1);
|
||||
|
||||
FSDataOutputStream s1 = kosmosFileSystem.create(file1, true, 4096, (short) 1, (long) 4096, null);
|
||||
|
||||
int bufsz = 4096;
|
||||
byte[] data = new byte[bufsz];
|
||||
|
||||
for (int i = 0; i < data.length; i++)
|
||||
data[i] = (byte) (i % 16);
|
||||
|
||||
// write 4 bytes and read them back; read API should return a byte per call
|
||||
s1.write(32);
|
||||
s1.write(32);
|
||||
s1.write(32);
|
||||
s1.write(32);
|
||||
// write some data
|
||||
s1.write(data, 0, data.length);
|
||||
// flush out the changes
|
||||
s1.close();
|
||||
|
||||
// Read the stuff back and verify it is correct
|
||||
FSDataInputStream s2 = kosmosFileSystem.open(file1, 4096);
|
||||
int v;
|
||||
long nread = 0;
|
||||
|
||||
v = s2.read();
|
||||
assertEquals(v, 32);
|
||||
v = s2.read();
|
||||
assertEquals(v, 32);
|
||||
v = s2.read();
|
||||
assertEquals(v, 32);
|
||||
v = s2.read();
|
||||
assertEquals(v, 32);
|
||||
|
||||
assertEquals(s2.available(), data.length);
|
||||
|
||||
byte[] buf = new byte[bufsz];
|
||||
s2.read(buf, 0, buf.length);
|
||||
nread = s2.getPos();
|
||||
|
||||
for (int i = 0; i < data.length; i++)
|
||||
assertEquals(data[i], buf[i]);
|
||||
|
||||
assertEquals(s2.available(), 0);
|
||||
|
||||
s2.close();
|
||||
|
||||
// append some data to the file
|
||||
try {
|
||||
s1 = kosmosFileSystem.append(file1);
|
||||
for (int i = 0; i < data.length; i++)
|
||||
data[i] = (byte) (i % 17);
|
||||
// write the data
|
||||
s1.write(data, 0, data.length);
|
||||
// flush out the changes
|
||||
s1.close();
|
||||
|
||||
// read it back and validate
|
||||
s2 = kosmosFileSystem.open(file1, 4096);
|
||||
s2.seek(nread);
|
||||
s2.read(buf, 0, buf.length);
|
||||
for (int i = 0; i < data.length; i++)
|
||||
assertEquals(data[i], buf[i]);
|
||||
|
||||
s2.close();
|
||||
} catch (Exception e) {
|
||||
System.out.println("append isn't supported by the underlying fs");
|
||||
}
|
||||
|
||||
kosmosFileSystem.delete(file1, true);
|
||||
assertFalse(kosmosFileSystem.exists(file1));
|
||||
kosmosFileSystem.delete(subDir1, true);
|
||||
assertFalse(kosmosFileSystem.exists(subDir1));
|
||||
kosmosFileSystem.delete(baseDir, true);
|
||||
assertFalse(kosmosFileSystem.exists(baseDir));
|
||||
}
|
||||
|
||||
}
|
|
@ -342,6 +342,15 @@ public class TestChRootedFileSystem {
|
|||
chrootFs.close();
|
||||
verify(mockFs).delete(eq(rawPath), eq(true));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testURIEmptyPath() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
|
||||
|
||||
URI chrootUri = URI.create("mockfs://foo");
|
||||
new ChRootedFileSystem(chrootUri, conf);
|
||||
}
|
||||
|
||||
static class MockFileSystem extends FilterFileSystem {
|
||||
MockFileSystem() {
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.viewfs;
|
||||
|
||||
import java.net.URI;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.FsConstants;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestViewFsURIs {
|
||||
@Test
|
||||
public void testURIEmptyPath() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
ConfigUtil.addLink(conf, "/user", new URI("file://foo"));
|
||||
|
||||
FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
|
||||
}
|
||||
}
|
|
@ -54,7 +54,6 @@ import org.apache.hadoop.io.SequenceFile;
|
|||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.SequenceFile.CompressionType;
|
||||
import org.apache.hadoop.io.compress.snappy.LoadSnappy;
|
||||
import org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor;
|
||||
import org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater;
|
||||
import org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater;
|
||||
|
@ -103,14 +102,9 @@ public class TestCodec {
|
|||
|
||||
@Test
|
||||
public void testSnappyCodec() throws IOException {
|
||||
if (LoadSnappy.isAvailable()) {
|
||||
if (LoadSnappy.isLoaded()) {
|
||||
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.SnappyCodec");
|
||||
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.SnappyCodec");
|
||||
}
|
||||
else {
|
||||
Assert.fail("Snappy native available but Hadoop native not");
|
||||
}
|
||||
if (SnappyCodec.isNativeCodeLoaded()) {
|
||||
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.SnappyCodec");
|
||||
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.SnappyCodec");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -60,6 +60,7 @@ import org.apache.hadoop.security.token.TokenInfo;
|
|||
import org.apache.hadoop.security.token.TokenSelector;
|
||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
/** Unit tests for using Sasl over RPC. */
|
||||
|
@ -76,7 +77,8 @@ public class TestSaslRPC {
|
|||
static final String SERVER_PRINCIPAL_2 = "p2/foo@BAR";
|
||||
|
||||
private static Configuration conf;
|
||||
static {
|
||||
@BeforeClass
|
||||
public static void setup() {
|
||||
conf = new Configuration();
|
||||
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
|
@ -449,11 +451,25 @@ public class TestSaslRPC {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testDigestAuthMethod() throws Exception {
|
||||
public void testDigestAuthMethodSecureServer() throws Exception {
|
||||
checkDigestAuthMethod(true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDigestAuthMethodInsecureServer() throws Exception {
|
||||
checkDigestAuthMethod(false);
|
||||
}
|
||||
|
||||
private void checkDigestAuthMethod(boolean secureServer) throws Exception {
|
||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||
Server server = new RPC.Builder(conf).setProtocol(TestSaslProtocol.class)
|
||||
.setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||
.setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
|
||||
if (secureServer) {
|
||||
server.enableSecurity();
|
||||
} else {
|
||||
server.disableSecurity();
|
||||
}
|
||||
server.start();
|
||||
|
||||
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
|
||||
|
|
|
@ -115,7 +115,10 @@ public abstract class GenericTestUtils {
|
|||
|
||||
Thread.sleep(checkEveryMillis);
|
||||
} while (Time.now() - st < waitForMillis);
|
||||
throw new TimeoutException("Timed out waiting for condition");
|
||||
|
||||
throw new TimeoutException("Timed out waiting for condition. " +
|
||||
"Thread diagnostics:\n" +
|
||||
TimedOutTestsListener.buildThreadDiagnosticString());
|
||||
}
|
||||
|
||||
public static class LogCapturer {
|
||||
|
|
|
@ -58,19 +58,28 @@ public class TimedOutTestsListener extends RunListener {
|
|||
&& failure.getMessage().startsWith(TEST_TIMED_OUT_PREFIX)) {
|
||||
output.println("====> TEST TIMED OUT. PRINTING THREAD DUMP. <====");
|
||||
output.println();
|
||||
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS");
|
||||
output.println(String.format("Timestamp: %s", dateFormat.format(new Date())));
|
||||
output.println();
|
||||
output.println(buildThreadDump());
|
||||
|
||||
String deadlocksInfo = buildDeadlockInfo();
|
||||
if (deadlocksInfo != null) {
|
||||
output.println("====> DEADLOCKS DETECTED <====");
|
||||
output.println();
|
||||
output.println(deadlocksInfo);
|
||||
}
|
||||
output.print(buildThreadDiagnosticString());
|
||||
}
|
||||
}
|
||||
|
||||
public static String buildThreadDiagnosticString() {
|
||||
StringWriter sw = new StringWriter();
|
||||
PrintWriter output = new PrintWriter(sw);
|
||||
|
||||
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS");
|
||||
output.println(String.format("Timestamp: %s", dateFormat.format(new Date())));
|
||||
output.println();
|
||||
output.println(buildThreadDump());
|
||||
|
||||
String deadlocksInfo = buildDeadlockInfo();
|
||||
if (deadlocksInfo != null) {
|
||||
output.println("====> DEADLOCKS DETECTED <====");
|
||||
output.println();
|
||||
output.println(deadlocksInfo);
|
||||
}
|
||||
|
||||
return sw.toString();
|
||||
}
|
||||
|
||||
static String buildThreadDump() {
|
||||
StringBuilder dump = new StringBuilder();
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
|
||||
public class TestNativeCodeLoader {
|
||||
static final Log LOG = LogFactory.getLog(TestNativeCodeLoader.class);
|
||||
|
||||
private static boolean requireTestJni() {
|
||||
String rtj = System.getProperty("require.test.libhadoop");
|
||||
if (rtj == null) return false;
|
||||
if (rtj.compareToIgnoreCase("false") == 0) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNativeCodeLoaded() {
|
||||
if (requireTestJni() == false) {
|
||||
LOG.info("TestNativeCodeLoader: libhadoop.so testing is not required.");
|
||||
return;
|
||||
}
|
||||
if (!NativeCodeLoader.isNativeCodeLoaded()) {
|
||||
fail("TestNativeCodeLoader: libhadoop.so testing was required, but " +
|
||||
"libhadoop.so was not loaded.");
|
||||
}
|
||||
LOG.info("TestNativeCodeLoader: libhadoop.so is loaded.");
|
||||
}
|
||||
}
|
|
@ -282,6 +282,19 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSimpleHostName() {
|
||||
assertEquals("Should return hostname when FQDN is specified",
|
||||
"hadoop01",
|
||||
StringUtils.simpleHostname("hadoop01.domain.com"));
|
||||
assertEquals("Should return hostname when only hostname is specified",
|
||||
"hadoop01",
|
||||
StringUtils.simpleHostname("hadoop01"));
|
||||
assertEquals("Should not truncate when IP address is passed",
|
||||
"10.10.5.68",
|
||||
StringUtils.simpleHostname("10.10.5.68"));
|
||||
}
|
||||
|
||||
// Benchmark for StringUtils split
|
||||
public static void main(String []args) {
|
||||
final String TO_SPLIT = "foo,bar,baz,blah,blah";
|
||||
|
|
|
@ -261,7 +261,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
|
|||
/**
|
||||
* Parameter name.
|
||||
*/
|
||||
public static final String NAME = "len";
|
||||
public static final String NAME = "length";
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.io.File;
|
|||
import java.io.FileOutputStream;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStream;
|
||||
import java.io.Writer;
|
||||
|
@ -146,6 +147,7 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
|
||||
HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
|
||||
conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
|
||||
conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
|
||||
File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
|
||||
os = new FileOutputStream(httpfsSite);
|
||||
conf.writeXml(os);
|
||||
|
@ -229,6 +231,31 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
reader.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestDir
|
||||
@TestJetty
|
||||
@TestHdfs
|
||||
public void testOpenOffsetLength() throws Exception {
|
||||
createHttpFSServer(false);
|
||||
|
||||
byte[] array = new byte[]{0, 1, 2, 3};
|
||||
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
|
||||
fs.mkdirs(new Path("/tmp"));
|
||||
OutputStream os = fs.create(new Path("/tmp/foo"));
|
||||
os.write(array);
|
||||
os.close();
|
||||
|
||||
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||
URL url = new URL(TestJettyHelper.getJettyURL(),
|
||||
MessageFormat.format("/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2", user));
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
|
||||
InputStream is = conn.getInputStream();
|
||||
Assert.assertEquals(1, is.read());
|
||||
Assert.assertEquals(2, is.read());
|
||||
Assert.assertEquals(-1, is.read());
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestDir
|
||||
@TestJetty
|
||||
|
|
|
@ -137,6 +137,16 @@ Trunk (Unreleased)
|
|||
HDFS-3880. Use Builder to build RPC server in HDFS.
|
||||
(Brandon Li vias suresh)
|
||||
|
||||
HDFS-2127. Add a test that ensure AccessControlExceptions contain
|
||||
a full path. (Stephen Chu via eli)
|
||||
|
||||
HDFS-3995. Use DFSTestUtil.createFile() for file creation and
|
||||
writing in test cases. (Jing Zhao via suresh)
|
||||
|
||||
HDFS-3735. NameNode WebUI should allow sorting live datanode list by fields
|
||||
Block Pool Used, Block Pool Used(%) and Failed Volumes.
|
||||
(Brahma Reddy Battula via suresh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -235,6 +245,31 @@ Release 2.0.3-alpha - Unreleased
|
|||
|
||||
HDFS-3939. NN RPC address cleanup. (eli)
|
||||
|
||||
HDFS-3373. Change DFSClient input stream socket cache to global static and add
|
||||
a thread to cleanup expired cache entries. (John George via szetszwo)
|
||||
|
||||
HDFS-3896. Add descriptions for dfs.namenode.rpc-address and
|
||||
dfs.namenode.servicerpc-address to hdfs-default.xml. (Jeff Lord via atm)
|
||||
|
||||
HDFS-3996. Add debug log removed in HDFS-3873 back. (eli)
|
||||
|
||||
HDFS-3916. libwebhdfs (C client) code cleanups.
|
||||
(Colin Patrick McCabe via eli)
|
||||
|
||||
HDFS-3813. Log error message if security and WebHDFS are enabled but
|
||||
principal/keytab are not configured. (Stephen Chu via atm)
|
||||
|
||||
HDFS-3483. Better error message when hdfs fsck is run against a ViewFS
|
||||
config. (Stephen Fritz via atm)
|
||||
|
||||
HDFS-3682. MiniDFSCluster#init should provide more info when it fails.
|
||||
(todd via eli)
|
||||
|
||||
HDFS-4008. TestBalancerWithEncryptedTransfer needs a timeout. (eli)
|
||||
|
||||
HDFS-4007. Rehabilitate bit-rotted unit tests under
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/test/unit/ (Colin Patrick McCabe via todd)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -259,6 +294,31 @@ Release 2.0.3-alpha - Unreleased
|
|||
|
||||
HDFS-3964. Make NN log of fs.defaultFS debug rather than info. (eli)
|
||||
|
||||
HDFS-3992. Method org.apache.hadoop.hdfs.TestHftpFileSystem.tearDown()
|
||||
sometimes throws NPEs. (Ivan A. Veselovsky via atm)
|
||||
|
||||
HDFS-3753. Tests don't run with native libraries.
|
||||
(Colin Patrick McCabe via eli)
|
||||
|
||||
HDFS-4000. TestParallelLocalRead fails with "input ByteBuffers
|
||||
must be direct buffers". (Colin Patrick McCabe via eli)
|
||||
|
||||
HDFS-3999. HttpFS OPEN operation expects len parameter, it should be length. (tucu)
|
||||
|
||||
HDFS-4006. TestCheckpoint#testSecondaryHasVeryOutOfDateImage
|
||||
occasionally fails due to unexpected exit. (todd via eli)
|
||||
|
||||
HDFS-4003. test-patch should build the common native libs before
|
||||
running hdfs tests. (Colin Patrick McCabe via eli)
|
||||
|
||||
HDFS-4018. testMiniDFSClusterWithMultipleNN is missing some
|
||||
cluster cleanup. (eli)
|
||||
|
||||
HDFS-4020. TestRBWBlockInvalidation may time out. (eli)
|
||||
|
||||
HDFS-4021. Misleading error message when resources are low on the NameNode.
|
||||
(Christopher Conner via atm)
|
||||
|
||||
Release 2.0.2-alpha - 2012-09-07
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -792,6 +852,8 @@ Release 2.0.2-alpha - 2012-09-07
|
|||
HDFS-3938. remove current limitations from HttpFS docs. (tucu)
|
||||
|
||||
HDFS-3944. Httpfs resolveAuthority() is not resolving host correctly. (tucu)
|
||||
|
||||
HDFS-3972. Trash emptier fails in secure HA cluster. (todd via eli)
|
||||
|
||||
BREAKDOWN OF HDFS-3042 SUBTASKS
|
||||
|
||||
|
@ -1628,6 +1690,27 @@ Release 2.0.0-alpha - 05-23-2012
|
|||
|
||||
HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)
|
||||
|
||||
Release 0.23.5 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HDFS-3829. TestHftpURLTimeouts fails intermittently with JDK7 (Trevor
|
||||
Robinson via tgraves)
|
||||
|
||||
HDFS-3824. TestHftpDelegationToken fails intermittently with JDK7 (Trevor
|
||||
Robinson via tgraves)
|
||||
|
||||
HDFS-3224. Bug in check for DN re-registration with different storage ID
|
||||
(jlowe)
|
||||
|
||||
Release 0.23.4 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -1640,7 +1723,10 @@ Release 0.23.4 - UNRELEASED
|
|||
|
||||
BUG FIXES
|
||||
|
||||
Release 0.23.3 - UNRELEASED
|
||||
HDFS-3831. Failure to renew tokens due to test-sources left in classpath
|
||||
(jlowe via bobby)
|
||||
|
||||
Release 0.23.3
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
|
|
|
@ -85,8 +85,8 @@ CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
|
|||
|
||||
add_dual_library(hdfs
|
||||
main/native/libhdfs/exception.c
|
||||
main/native/libhdfs/hdfs.c
|
||||
main/native/libhdfs/jni_helper.c
|
||||
main/native/libhdfs/hdfs.c
|
||||
)
|
||||
target_link_dual_libraries(hdfs
|
||||
${JAVA_JVM_LIBRARY}
|
||||
|
|
|
@ -16,28 +16,21 @@
|
|||
# limitations under the License.
|
||||
#
|
||||
|
||||
find_package(CURL)
|
||||
if (CURL_FOUND)
|
||||
include_directories(${CURL_INCLUDE_DIRS})
|
||||
else (CURL_FOUND)
|
||||
MESSAGE(STATUS "Failed to find CURL library.")
|
||||
endif (CURL_FOUND)
|
||||
find_package(CURL REQUIRED)
|
||||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}
|
||||
"${CMAKE_SOURCE_DIR}/contrib/libwebhdfs/resources/")
|
||||
MESSAGE("CMAKE_MODULE_PATH IS: " ${CMAKE_MODULE_PATH})
|
||||
|
||||
find_package(Jansson)
|
||||
find_package(Jansson REQUIRED)
|
||||
include_directories(${JANSSON_INCLUDE_DIR})
|
||||
|
||||
add_dual_library(webhdfs
|
||||
src/exception.c
|
||||
src/hdfs_web.c
|
||||
src/hdfs_jni.c
|
||||
src/jni_helper.c
|
||||
src/hdfs_http_client.c
|
||||
src/hdfs_http_query.c
|
||||
src/hdfs_json_parser.c
|
||||
../../main/native/libhdfs/exception.c
|
||||
../../main/native/libhdfs/jni_helper.c
|
||||
)
|
||||
target_link_dual_libraries(webhdfs
|
||||
${JAVA_JVM_LIBRARY}
|
||||
|
@ -55,10 +48,6 @@ add_executable(test_libwebhdfs_ops
|
|||
)
|
||||
target_link_libraries(test_libwebhdfs_ops
|
||||
webhdfs
|
||||
${CURL_LIBRARY}
|
||||
${JAVA_JVM_LIBRARY}
|
||||
${JANSSON_LIBRARY}
|
||||
pthread
|
||||
)
|
||||
|
||||
add_executable(test_libwebhdfs_read
|
||||
|
@ -66,10 +55,6 @@ add_executable(test_libwebhdfs_read
|
|||
)
|
||||
target_link_libraries(test_libwebhdfs_read
|
||||
webhdfs
|
||||
${CURL_LIBRARY}
|
||||
${JAVA_JVM_LIBRARY}
|
||||
${JANSSON_LIBRARY}
|
||||
pthread
|
||||
)
|
||||
|
||||
add_executable(test_libwebhdfs_write
|
||||
|
@ -77,10 +62,6 @@ add_executable(test_libwebhdfs_write
|
|||
)
|
||||
target_link_libraries(test_libwebhdfs_write
|
||||
webhdfs
|
||||
${CURL_LIBRARY}
|
||||
${JAVA_JVM_LIBRARY}
|
||||
${JANSSON_LIBRARY}
|
||||
pthread
|
||||
)
|
||||
|
||||
add_executable(test_libwebhdfs_threaded
|
||||
|
@ -88,8 +69,4 @@ add_executable(test_libwebhdfs_threaded
|
|||
)
|
||||
target_link_libraries(test_libwebhdfs_threaded
|
||||
webhdfs
|
||||
${CURL_LIBRARY}
|
||||
${JAVA_JVM_LIBRARY}
|
||||
${JANSSON_LIBRARY}
|
||||
pthread
|
||||
)
|
||||
|
|
|
@ -1,237 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "exception.h"
|
||||
#include "webhdfs.h"
|
||||
#include "jni_helper.h"
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#define EXCEPTION_INFO_LEN (sizeof(gExceptionInfo)/sizeof(gExceptionInfo[0]))
|
||||
|
||||
struct ExceptionInfo {
|
||||
const char * const name;
|
||||
int noPrintFlag;
|
||||
int excErrno;
|
||||
};
|
||||
|
||||
static const struct ExceptionInfo gExceptionInfo[] = {
|
||||
{
|
||||
.name = "java/io/FileNotFoundException",
|
||||
.noPrintFlag = NOPRINT_EXC_FILE_NOT_FOUND,
|
||||
.excErrno = ENOENT,
|
||||
},
|
||||
{
|
||||
.name = "org/apache/hadoop/security/AccessControlException",
|
||||
.noPrintFlag = NOPRINT_EXC_ACCESS_CONTROL,
|
||||
.excErrno = EACCES,
|
||||
},
|
||||
{
|
||||
.name = "org/apache/hadoop/fs/UnresolvedLinkException",
|
||||
.noPrintFlag = NOPRINT_EXC_UNRESOLVED_LINK,
|
||||
.excErrno = ENOLINK,
|
||||
},
|
||||
{
|
||||
.name = "org/apache/hadoop/fs/ParentNotDirectoryException",
|
||||
.noPrintFlag = NOPRINT_EXC_PARENT_NOT_DIRECTORY,
|
||||
.excErrno = ENOTDIR,
|
||||
},
|
||||
{
|
||||
.name = "java/lang/IllegalArgumentException",
|
||||
.noPrintFlag = NOPRINT_EXC_ILLEGAL_ARGUMENT,
|
||||
.excErrno = EINVAL,
|
||||
},
|
||||
{
|
||||
.name = "java/lang/OutOfMemoryError",
|
||||
.noPrintFlag = 0,
|
||||
.excErrno = ENOMEM,
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
int printExceptionWebV(hdfs_exception_msg *exc, int noPrintFlags, const char *fmt, va_list ap)
|
||||
{
|
||||
int i, noPrint, excErrno;
|
||||
if (!exc) {
|
||||
fprintf(stderr, "printExceptionWebV: the hdfs_exception_msg is NULL\n");
|
||||
return EINTERNAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < EXCEPTION_INFO_LEN; i++) {
|
||||
if (strstr(gExceptionInfo[i].name, exc->exception)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i < EXCEPTION_INFO_LEN) {
|
||||
noPrint = (gExceptionInfo[i].noPrintFlag & noPrintFlags);
|
||||
excErrno = gExceptionInfo[i].excErrno;
|
||||
} else {
|
||||
noPrint = 0;
|
||||
excErrno = EINTERNAL;
|
||||
}
|
||||
|
||||
if (!noPrint) {
|
||||
vfprintf(stderr, fmt, ap);
|
||||
fprintf(stderr, " error:\n");
|
||||
fprintf(stderr, "Exception: %s\nJavaClassName: %s\nMessage: %s\n", exc->exception, exc->javaClassName, exc->message);
|
||||
}
|
||||
|
||||
free(exc);
|
||||
return excErrno;
|
||||
}
|
||||
|
||||
int printExceptionWeb(hdfs_exception_msg *exc, int noPrintFlags, const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int ret;
|
||||
|
||||
va_start(ap, fmt);
|
||||
ret = printExceptionWebV(exc, noPrintFlags, fmt, ap);
|
||||
va_end(ap);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
|
||||
const char *fmt, va_list ap)
|
||||
{
|
||||
int i, noPrint, excErrno;
|
||||
char *className = NULL;
|
||||
jstring jStr = NULL;
|
||||
jvalue jVal;
|
||||
jthrowable jthr;
|
||||
|
||||
jthr = classNameOfObject(exc, env, &className);
|
||||
if (jthr) {
|
||||
fprintf(stderr, "PrintExceptionAndFree: error determining class name "
|
||||
"of exception.\n");
|
||||
className = strdup("(unknown)");
|
||||
destroyLocalReference(env, jthr);
|
||||
}
|
||||
for (i = 0; i < EXCEPTION_INFO_LEN; i++) {
|
||||
if (!strcmp(gExceptionInfo[i].name, className)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i < EXCEPTION_INFO_LEN) {
|
||||
noPrint = (gExceptionInfo[i].noPrintFlag & noPrintFlags);
|
||||
excErrno = gExceptionInfo[i].excErrno;
|
||||
} else {
|
||||
noPrint = 0;
|
||||
excErrno = EINTERNAL;
|
||||
}
|
||||
if (!noPrint) {
|
||||
vfprintf(stderr, fmt, ap);
|
||||
fprintf(stderr, " error:\n");
|
||||
|
||||
// We don't want to use ExceptionDescribe here, because that requires a
|
||||
// pending exception. Instead, use ExceptionUtils.
|
||||
jthr = invokeMethod(env, &jVal, STATIC, NULL,
|
||||
"org/apache/commons/lang/exception/ExceptionUtils",
|
||||
"getStackTrace", "(Ljava/lang/Throwable;)Ljava/lang/String;", exc);
|
||||
if (jthr) {
|
||||
fprintf(stderr, "(unable to get stack trace for %s exception: "
|
||||
"ExceptionUtils::getStackTrace error.)\n", className);
|
||||
destroyLocalReference(env, jthr);
|
||||
} else {
|
||||
jStr = jVal.l;
|
||||
const char *stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
|
||||
if (!stackTrace) {
|
||||
fprintf(stderr, "(unable to get stack trace for %s exception: "
|
||||
"GetStringUTFChars error.)\n", className);
|
||||
} else {
|
||||
fprintf(stderr, "%s", stackTrace);
|
||||
(*env)->ReleaseStringUTFChars(env, jStr, stackTrace);
|
||||
}
|
||||
}
|
||||
}
|
||||
destroyLocalReference(env, jStr);
|
||||
destroyLocalReference(env, exc);
|
||||
free(className);
|
||||
return excErrno;
|
||||
}
|
||||
|
||||
int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int ret;
|
||||
|
||||
va_start(ap, fmt);
|
||||
ret = printExceptionAndFreeV(env, exc, noPrintFlags, fmt, ap);
|
||||
va_end(ap);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int printPendingExceptionAndFree(JNIEnv *env, int noPrintFlags,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int ret;
|
||||
jthrowable exc;
|
||||
|
||||
exc = (*env)->ExceptionOccurred(env);
|
||||
if (!exc) {
|
||||
va_start(ap, fmt);
|
||||
vfprintf(stderr, fmt, ap);
|
||||
va_end(ap);
|
||||
fprintf(stderr, " error: (no exception)");
|
||||
ret = 0;
|
||||
} else {
|
||||
(*env)->ExceptionClear(env);
|
||||
va_start(ap, fmt);
|
||||
ret = printExceptionAndFreeV(env, exc, noPrintFlags, fmt, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
jthrowable getPendingExceptionAndClear(JNIEnv *env)
|
||||
{
|
||||
jthrowable jthr = (*env)->ExceptionOccurred(env);
|
||||
if (!jthr)
|
||||
return NULL;
|
||||
(*env)->ExceptionClear(env);
|
||||
return jthr;
|
||||
}
|
||||
|
||||
jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...)
|
||||
{
|
||||
char buf[512];
|
||||
jobject out, exc;
|
||||
jstring jstr;
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, fmt);
|
||||
vsnprintf(buf, sizeof(buf), fmt, ap);
|
||||
va_end(ap);
|
||||
jstr = (*env)->NewStringUTF(env, buf);
|
||||
if (!jstr) {
|
||||
// We got an out of memory exception rather than a RuntimeException.
|
||||
// Too bad...
|
||||
return getPendingExceptionAndClear(env);
|
||||
}
|
||||
exc = constructNewObjectOfClass(env, &out, "RuntimeException",
|
||||
"(java/lang/String;)V", jstr);
|
||||
(*env)->DeleteLocalRef(env, jstr);
|
||||
// Again, we'll either get an out of memory exception or the
|
||||
// RuntimeException we wanted.
|
||||
return (exc) ? exc : out;
|
||||
}
|
|
@ -1,178 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef LIBHDFS_EXCEPTION_H
|
||||
#define LIBHDFS_EXCEPTION_H
|
||||
|
||||
/**
|
||||
* Exception handling routines for libhdfs.
|
||||
*
|
||||
* The convention we follow here is to clear pending exceptions as soon as they
|
||||
* are raised. Never assume that the caller of your function will clean up
|
||||
* after you-- do it yourself. Unhandled exceptions can lead to memory leaks
|
||||
* and other undefined behavior.
|
||||
*
|
||||
* If you encounter an exception, return a local reference to it. The caller is
|
||||
* responsible for freeing the local reference, by calling a function like
|
||||
* PrintExceptionAndFree. (You can also free exceptions directly by calling
|
||||
* DeleteLocalRef. However, that would not produce an error message, so it's
|
||||
* usually not what you want.)
|
||||
*/
|
||||
|
||||
#include <jni.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdarg.h>
|
||||
#include <search.h>
|
||||
#include <pthread.h>
|
||||
#include <errno.h>
|
||||
|
||||
/**
|
||||
* Exception noprint flags
|
||||
*
|
||||
* Theses flags determine which exceptions should NOT be printed to stderr by
|
||||
* the exception printing routines. For example, if you expect to see
|
||||
* FileNotFound, you might use NOPRINT_EXC_FILE_NOT_FOUND, to avoid filling the
|
||||
* logs with messages about routine events.
|
||||
*
|
||||
* On the other hand, if you don't expect any failures, you might pass
|
||||
* PRINT_EXC_ALL.
|
||||
*
|
||||
* You can OR these flags together to avoid printing multiple classes of
|
||||
* exceptions.
|
||||
*/
|
||||
#define PRINT_EXC_ALL 0x00
|
||||
#define NOPRINT_EXC_FILE_NOT_FOUND 0x01
|
||||
#define NOPRINT_EXC_ACCESS_CONTROL 0x02
|
||||
#define NOPRINT_EXC_UNRESOLVED_LINK 0x04
|
||||
#define NOPRINT_EXC_PARENT_NOT_DIRECTORY 0x08
|
||||
#define NOPRINT_EXC_ILLEGAL_ARGUMENT 0x10
|
||||
|
||||
/**
|
||||
* Exception information after calling webhdfs operations
|
||||
*/
|
||||
typedef struct {
|
||||
const char *exception;
|
||||
const char *javaClassName;
|
||||
const char *message;
|
||||
} hdfs_exception_msg;
|
||||
|
||||
/**
|
||||
* Print out exception information got after calling webhdfs operations
|
||||
*
|
||||
* @param exc The exception information to print and free
|
||||
* @param noPrintFlags Flags which determine which exceptions we should NOT
|
||||
* print.
|
||||
* @param fmt Printf-style format list
|
||||
* @param ap Printf-style varargs
|
||||
*
|
||||
* @return The POSIX error number associated with the exception
|
||||
* object.
|
||||
*/
|
||||
int printExceptionWebV(hdfs_exception_msg *exc, int noPrintFlags, const char *fmt, va_list ap);
|
||||
|
||||
/**
|
||||
* Print out exception information got after calling webhdfs operations
|
||||
*
|
||||
* @param exc The exception information to print and free
|
||||
* @param noPrintFlags Flags which determine which exceptions we should NOT
|
||||
* print.
|
||||
* @param fmt Printf-style format list
|
||||
* @param ... Printf-style varargs
|
||||
*
|
||||
* @return The POSIX error number associated with the exception
|
||||
* object.
|
||||
*/
|
||||
int printExceptionWeb(hdfs_exception_msg *exc, int noPrintFlags,
|
||||
const char *fmt, ...) __attribute__((format(printf, 3, 4)));
|
||||
|
||||
/**
|
||||
* Print out information about an exception and free it.
|
||||
*
|
||||
* @param env The JNI environment
|
||||
* @param exc The exception to print and free
|
||||
* @param noPrintFlags Flags which determine which exceptions we should NOT
|
||||
* print.
|
||||
* @param fmt Printf-style format list
|
||||
* @param ap Printf-style varargs
|
||||
*
|
||||
* @return The POSIX error number associated with the exception
|
||||
* object.
|
||||
*/
|
||||
int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
|
||||
const char *fmt, va_list ap);
|
||||
|
||||
/**
|
||||
* Print out information about an exception and free it.
|
||||
*
|
||||
* @param env The JNI environment
|
||||
* @param exc The exception to print and free
|
||||
* @param noPrintFlags Flags which determine which exceptions we should NOT
|
||||
* print.
|
||||
* @param fmt Printf-style format list
|
||||
* @param ... Printf-style varargs
|
||||
*
|
||||
* @return The POSIX error number associated with the exception
|
||||
* object.
|
||||
*/
|
||||
int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
|
||||
const char *fmt, ...) __attribute__((format(printf, 4, 5)));
|
||||
|
||||
/**
|
||||
* Print out information about the pending exception and free it.
|
||||
*
|
||||
* @param env The JNI environment
|
||||
* @param noPrintFlags Flags which determine which exceptions we should NOT
|
||||
* print.
|
||||
* @param fmt Printf-style format list
|
||||
* @param ... Printf-style varargs
|
||||
*
|
||||
* @return The POSIX error number associated with the exception
|
||||
* object.
|
||||
*/
|
||||
int printPendingExceptionAndFree(JNIEnv *env, int noPrintFlags,
|
||||
const char *fmt, ...) __attribute__((format(printf, 3, 4)));
|
||||
|
||||
/**
|
||||
* Get a local reference to the pending exception and clear it.
|
||||
*
|
||||
* Once it is cleared, the exception will no longer be pending. The caller will
|
||||
* have to decide what to do with the exception object.
|
||||
*
|
||||
* @param env The JNI environment
|
||||
*
|
||||
* @return The exception, or NULL if there was no exception
|
||||
*/
|
||||
jthrowable getPendingExceptionAndClear(JNIEnv *env);
|
||||
|
||||
/**
|
||||
* Create a new runtime error.
|
||||
*
|
||||
* This creates (but does not throw) a new RuntimeError.
|
||||
*
|
||||
* @param env The JNI environment
|
||||
* @param fmt Printf-style format list
|
||||
* @param ... Printf-style varargs
|
||||
*
|
||||
* @return A local reference to a RuntimeError
|
||||
*/
|
||||
jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...)
|
||||
__attribute__((format(printf, 2, 3)));
|
||||
|
||||
#endif
|
|
@ -1,101 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef LIBHDFS_NATIVE_TESTS_EXPECT_H
|
||||
#define LIBHDFS_NATIVE_TESTS_EXPECT_H
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#define EXPECT_ZERO(x) \
|
||||
do { \
|
||||
int __my_ret__ = x; \
|
||||
if (__my_ret__) { \
|
||||
int __my_errno__ = errno; \
|
||||
fprintf(stderr, "TEST_ERROR: failed on line %d with return " \
|
||||
"code %d (errno: %d): got nonzero from %s\n", \
|
||||
__LINE__, __my_ret__, __my_errno__, #x); \
|
||||
return __my_ret__; \
|
||||
} \
|
||||
} while (0);
|
||||
|
||||
#define EXPECT_NULL(x) \
|
||||
do { \
|
||||
void* __my_ret__ = x; \
|
||||
int __my_errno__ = errno; \
|
||||
if (__my_ret__ != NULL) { \
|
||||
fprintf(stderr, "TEST_ERROR: failed on line %d (errno: %d): " \
|
||||
"got non-NULL value %p from %s\n", \
|
||||
__LINE__, __my_errno__, __my_ret__, #x); \
|
||||
return -1; \
|
||||
} \
|
||||
} while (0);
|
||||
|
||||
#define EXPECT_NONNULL(x) \
|
||||
do { \
|
||||
void* __my_ret__ = x; \
|
||||
int __my_errno__ = errno; \
|
||||
if (__my_ret__ == NULL) { \
|
||||
fprintf(stderr, "TEST_ERROR: failed on line %d (errno: %d): " \
|
||||
"got NULL from %s\n", __LINE__, __my_errno__, #x); \
|
||||
return -1; \
|
||||
} \
|
||||
} while (0);
|
||||
|
||||
#define EXPECT_NEGATIVE_ONE_WITH_ERRNO(x, e) \
|
||||
do { \
|
||||
int __my_ret__ = x; \
|
||||
int __my_errno__ = errno; \
|
||||
if (__my_ret__ != -1) { \
|
||||
fprintf(stderr, "TEST_ERROR: failed on line %d with return " \
|
||||
"code %d (errno: %d): expected -1 from %s\n", __LINE__, \
|
||||
__my_ret__, __my_errno__, #x); \
|
||||
return -1; \
|
||||
} \
|
||||
if (__my_errno__ != e) { \
|
||||
fprintf(stderr, "TEST_ERROR: failed on line %d with return " \
|
||||
"code %d (errno: %d): expected errno = %d from %s\n", \
|
||||
__LINE__, __my_ret__, __my_errno__, e, #x); \
|
||||
return -1; \
|
||||
} \
|
||||
} while (0);
|
||||
|
||||
#define EXPECT_NONZERO(x) \
|
||||
do { \
|
||||
int __my_ret__ = x; \
|
||||
int __my_errno__ = errno; \
|
||||
if (__my_ret__) { \
|
||||
fprintf(stderr, "TEST_ERROR: failed on line %d with return " \
|
||||
"code %d (errno: %d): got zero from %s\n", __LINE__, \
|
||||
__my_ret__, __my_errno__, #x); \
|
||||
return -1; \
|
||||
} \
|
||||
} while (0);
|
||||
|
||||
#define EXPECT_NONNEGATIVE(x) \
|
||||
do { \
|
||||
int __my_ret__ = x; \
|
||||
int __my_errno__ = errno; \
|
||||
if (__my_ret__ < 0) { \
|
||||
fprintf(stderr, "TEST_ERROR: failed on line %d with return " \
|
||||
"code %d (errno: %d): got negative return from %s\n", \
|
||||
__LINE__, __my_ret__, __my_errno__, #x); \
|
||||
return __my_ret__; \
|
||||
} \
|
||||
} while (0);
|
||||
|
||||
#endif
|
|
@ -21,8 +21,42 @@
|
|||
#ifndef _HDFS_HTTP_CLIENT_H_
|
||||
#define _HDFS_HTTP_CLIENT_H_
|
||||
|
||||
#include "webhdfs.h"
|
||||
#include <curl/curl.h>
|
||||
#include "hdfs.h" /* for tSize */
|
||||
|
||||
#include <pthread.h> /* for pthread_t */
|
||||
#include <unistd.h> /* for size_t */
|
||||
|
||||
enum hdfsStreamType
|
||||
{
|
||||
UNINITIALIZED = 0,
|
||||
INPUT = 1,
|
||||
OUTPUT = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* webhdfsBuffer - used for hold the data for read/write from/to http connection
|
||||
*/
|
||||
typedef struct {
|
||||
const char *wbuffer; // The user's buffer for uploading
|
||||
size_t remaining; // Length of content
|
||||
size_t offset; // offset for reading
|
||||
int openFlag; // Check whether the hdfsOpenFile has been called before
|
||||
int closeFlag; // Whether to close the http connection for writing
|
||||
pthread_mutex_t writeMutex; // Synchronization between the curl and hdfsWrite threads
|
||||
pthread_cond_t newwrite_or_close; // Transferring thread waits for this condition
|
||||
// when there is no more content for transferring in the buffer
|
||||
pthread_cond_t transfer_finish; // Condition used to indicate finishing transferring (one buffer)
|
||||
} webhdfsBuffer;
|
||||
|
||||
struct webhdfsFileHandle {
|
||||
char *absPath;
|
||||
int bufferSize;
|
||||
short replication;
|
||||
tSize blockSize;
|
||||
char *datanode;
|
||||
webhdfsBuffer *uploadBuffer;
|
||||
pthread_t connThread;
|
||||
};
|
||||
|
||||
enum HttpHeader {
|
||||
GET,
|
||||
|
|
|
@ -1,616 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include "webhdfs.h"
|
||||
#include "jni_helper.h"
|
||||
#include "exception.h"
|
||||
|
||||
/* Some frequently used Java paths */
|
||||
#define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
|
||||
#define HADOOP_PATH "org/apache/hadoop/fs/Path"
|
||||
#define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem"
|
||||
#define HADOOP_FS "org/apache/hadoop/fs/FileSystem"
|
||||
#define HADOOP_FSSTATUS "org/apache/hadoop/fs/FsStatus"
|
||||
#define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation"
|
||||
#define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem"
|
||||
#define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream"
|
||||
#define HADOOP_OSTRM "org/apache/hadoop/fs/FSDataOutputStream"
|
||||
#define HADOOP_STAT "org/apache/hadoop/fs/FileStatus"
|
||||
#define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission"
|
||||
#define JAVA_NET_ISA "java/net/InetSocketAddress"
|
||||
#define JAVA_NET_URI "java/net/URI"
|
||||
#define JAVA_STRING "java/lang/String"
|
||||
|
||||
#define JAVA_VOID "V"
|
||||
|
||||
/* Macros for constructing method signatures */
|
||||
#define JPARAM(X) "L" X ";"
|
||||
#define JARRPARAM(X) "[L" X ";"
|
||||
#define JMETHOD1(X, R) "(" X ")" R
|
||||
#define JMETHOD2(X, Y, R) "(" X Y ")" R
|
||||
#define JMETHOD3(X, Y, Z, R) "(" X Y Z")" R
|
||||
|
||||
#define KERBEROS_TICKET_CACHE_PATH "hadoop.security.kerberos.ticket.cache.path"
|
||||
|
||||
/**
|
||||
* Helper function to create a org.apache.hadoop.fs.Path object.
|
||||
* @param env: The JNIEnv pointer.
|
||||
* @param path: The file-path for which to construct org.apache.hadoop.fs.Path
|
||||
* object.
|
||||
* @return Returns a jobject on success and NULL on error.
|
||||
*/
|
||||
static jthrowable constructNewObjectOfPath(JNIEnv *env, const char *path,
|
||||
jobject *out)
|
||||
{
|
||||
jthrowable jthr;
|
||||
jstring jPathString;
|
||||
jobject jPath;
|
||||
|
||||
//Construct a java.lang.String object
|
||||
jthr = newJavaStr(env, path, &jPathString);
|
||||
if (jthr)
|
||||
return jthr;
|
||||
//Construct the org.apache.hadoop.fs.Path object
|
||||
jthr = constructNewObjectOfClass(env, &jPath, "org/apache/hadoop/fs/Path",
|
||||
"(Ljava/lang/String;)V", jPathString);
|
||||
destroyLocalReference(env, jPathString);
|
||||
if (jthr)
|
||||
return jthr;
|
||||
*out = jPath;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a configuration value.
|
||||
*
|
||||
* @param env The JNI environment
|
||||
* @param jConfiguration The configuration object to modify
|
||||
* @param key The key to modify
|
||||
* @param value The value to set the key to
|
||||
*
|
||||
* @return NULL on success; exception otherwise
|
||||
*/
|
||||
static jthrowable hadoopConfSetStr(JNIEnv *env, jobject jConfiguration,
|
||||
const char *key, const char *value)
|
||||
{
|
||||
jthrowable jthr;
|
||||
jstring jkey = NULL, jvalue = NULL;
|
||||
|
||||
jthr = newJavaStr(env, key, &jkey);
|
||||
if (jthr)
|
||||
goto done;
|
||||
jthr = newJavaStr(env, value, &jvalue);
|
||||
if (jthr)
|
||||
goto done;
|
||||
jthr = invokeMethod(env, NULL, INSTANCE, jConfiguration,
|
||||
HADOOP_CONF, "set", JMETHOD2(JPARAM(JAVA_STRING),
|
||||
JPARAM(JAVA_STRING), JAVA_VOID),
|
||||
jkey, jvalue);
|
||||
if (jthr)
|
||||
goto done;
|
||||
done:
|
||||
destroyLocalReference(env, jkey);
|
||||
destroyLocalReference(env, jvalue);
|
||||
return jthr;
|
||||
}
|
||||
|
||||
static jthrowable hadoopConfGetStr(JNIEnv *env, jobject jConfiguration,
|
||||
const char *key, char **val)
|
||||
{
|
||||
jthrowable jthr;
|
||||
jvalue jVal;
|
||||
jstring jkey = NULL, jRet = NULL;
|
||||
|
||||
jthr = newJavaStr(env, key, &jkey);
|
||||
if (jthr)
|
||||
goto done;
|
||||
jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
|
||||
HADOOP_CONF, "get", JMETHOD1(JPARAM(JAVA_STRING),
|
||||
JPARAM(JAVA_STRING)), jkey);
|
||||
if (jthr)
|
||||
goto done;
|
||||
jRet = jVal.l;
|
||||
jthr = newCStr(env, jRet, val);
|
||||
done:
|
||||
destroyLocalReference(env, jkey);
|
||||
destroyLocalReference(env, jRet);
|
||||
return jthr;
|
||||
}
|
||||
|
||||
int hdfsConfGetStr(const char *key, char **val)
|
||||
{
|
||||
JNIEnv *env;
|
||||
int ret;
|
||||
jthrowable jthr;
|
||||
jobject jConfiguration = NULL;
|
||||
|
||||
env = getJNIEnv();
|
||||
if (env == NULL) {
|
||||
ret = EINTERNAL;
|
||||
goto done;
|
||||
}
|
||||
jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsConfGetStr(%s): new Configuration", key);
|
||||
goto done;
|
||||
}
|
||||
jthr = hadoopConfGetStr(env, jConfiguration, key, val);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsConfGetStr(%s): hadoopConfGetStr", key);
|
||||
goto done;
|
||||
}
|
||||
ret = 0;
|
||||
done:
|
||||
destroyLocalReference(env, jConfiguration);
|
||||
if (ret)
|
||||
errno = ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hdfsConfStrFree(char *val)
|
||||
{
|
||||
free(val);
|
||||
}
|
||||
|
||||
static jthrowable hadoopConfGetInt(JNIEnv *env, jobject jConfiguration,
|
||||
const char *key, int32_t *val)
|
||||
{
|
||||
jthrowable jthr = NULL;
|
||||
jvalue jVal;
|
||||
jstring jkey = NULL;
|
||||
|
||||
jthr = newJavaStr(env, key, &jkey);
|
||||
if (jthr)
|
||||
return jthr;
|
||||
jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
|
||||
HADOOP_CONF, "getInt", JMETHOD2(JPARAM(JAVA_STRING), "I", "I"),
|
||||
jkey, (jint)(*val));
|
||||
destroyLocalReference(env, jkey);
|
||||
if (jthr)
|
||||
return jthr;
|
||||
*val = jVal.i;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int hdfsConfGetInt(const char *key, int32_t *val)
|
||||
{
|
||||
JNIEnv *env;
|
||||
int ret;
|
||||
jobject jConfiguration = NULL;
|
||||
jthrowable jthr;
|
||||
|
||||
env = getJNIEnv();
|
||||
if (env == NULL) {
|
||||
ret = EINTERNAL;
|
||||
goto done;
|
||||
}
|
||||
jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsConfGetInt(%s): new Configuration", key);
|
||||
goto done;
|
||||
}
|
||||
jthr = hadoopConfGetInt(env, jConfiguration, key, val);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsConfGetInt(%s): hadoopConfGetInt", key);
|
||||
goto done;
|
||||
}
|
||||
ret = 0;
|
||||
done:
|
||||
destroyLocalReference(env, jConfiguration);
|
||||
if (ret)
|
||||
errno = ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the effective URI to use, given a builder configuration.
|
||||
*
|
||||
* If there is not already a URI scheme, we prepend 'hdfs://'.
|
||||
*
|
||||
* If there is not already a port specified, and a port was given to the
|
||||
* builder, we suffix that port. If there is a port specified but also one in
|
||||
* the URI, that is an error.
|
||||
*
|
||||
* @param bld The hdfs builder object
|
||||
* @param uri (out param) dynamically allocated string representing the
|
||||
* effective URI
|
||||
*
|
||||
* @return 0 on success; error code otherwise
|
||||
*/
|
||||
static int calcEffectiveURI(struct hdfsBuilder *bld, char ** uri)
|
||||
{
|
||||
const char *scheme;
|
||||
char suffix[64];
|
||||
const char *lastColon;
|
||||
char *u;
|
||||
size_t uriLen;
|
||||
|
||||
if (!bld->nn_jni)
|
||||
return EINVAL;
|
||||
scheme = (strstr(bld->nn_jni, "://")) ? "" : "hdfs://";
|
||||
if (bld->port == 0) {
|
||||
suffix[0] = '\0';
|
||||
} else {
|
||||
lastColon = rindex(bld->nn_jni, ':');
|
||||
if (lastColon && (strspn(lastColon + 1, "0123456789") ==
|
||||
strlen(lastColon + 1))) {
|
||||
fprintf(stderr, "port %d was given, but URI '%s' already "
|
||||
"contains a port!\n", bld->port, bld->nn_jni);
|
||||
return EINVAL;
|
||||
}
|
||||
snprintf(suffix, sizeof(suffix), ":%d", bld->port);
|
||||
}
|
||||
|
||||
uriLen = strlen(scheme) + strlen(bld->nn_jni) + strlen(suffix);
|
||||
u = malloc((uriLen + 1) * (sizeof(char)));
|
||||
if (!u) {
|
||||
fprintf(stderr, "calcEffectiveURI: out of memory");
|
||||
return ENOMEM;
|
||||
}
|
||||
snprintf(u, uriLen + 1, "%s%s%s", scheme, bld->nn_jni, suffix);
|
||||
*uri = u;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *maybeNull(const char *str)
|
||||
{
|
||||
return str ? str : "(NULL)";
|
||||
}
|
||||
|
||||
const char *hdfsBuilderToStr(const struct hdfsBuilder *bld,
|
||||
char *buf, size_t bufLen)
|
||||
{
|
||||
snprintf(buf, bufLen, "forceNewInstance=%d, nn=%s, port=%d, "
|
||||
"kerbTicketCachePath=%s, userName=%s, workingDir=%s\n",
|
||||
bld->forceNewInstance, maybeNull(bld->nn), bld->port,
|
||||
maybeNull(bld->kerbTicketCachePath),
|
||||
maybeNull(bld->userName), maybeNull(bld->workingDir));
|
||||
return buf;
|
||||
}
|
||||
|
||||
/*
|
||||
* The JNI version of builderConnect, return the reflection of FileSystem
|
||||
*/
|
||||
jobject hdfsBuilderConnect_JNI(JNIEnv *env, struct hdfsBuilder *bld)
|
||||
{
|
||||
jobject jConfiguration = NULL, jFS = NULL, jURI = NULL, jCachePath = NULL;
|
||||
jstring jURIString = NULL, jUserString = NULL;
|
||||
jvalue jVal;
|
||||
jthrowable jthr = NULL;
|
||||
char *cURI = 0, buf[512];
|
||||
int ret;
|
||||
jobject jRet = NULL;
|
||||
|
||||
// jConfiguration = new Configuration();
|
||||
jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsBuilderConnect_JNI(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
||||
goto done;
|
||||
}
|
||||
|
||||
//Check what type of FileSystem the caller wants...
|
||||
if (bld->nn_jni == NULL) {
|
||||
// Get a local filesystem.
|
||||
// Also handle the scenario where nn of hdfsBuilder is set to localhost.
|
||||
if (bld->forceNewInstance) {
|
||||
// fs = FileSytem#newInstanceLocal(conf);
|
||||
jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
|
||||
"newInstanceLocal", JMETHOD1(JPARAM(HADOOP_CONF),
|
||||
JPARAM(HADOOP_LOCALFS)), jConfiguration);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsBuilderConnect_JNI(%s)",
|
||||
hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
||||
goto done;
|
||||
}
|
||||
jFS = jVal.l;
|
||||
} else {
|
||||
// fs = FileSytem#getLocal(conf);
|
||||
jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "getLocal",
|
||||
JMETHOD1(JPARAM(HADOOP_CONF),
|
||||
JPARAM(HADOOP_LOCALFS)),
|
||||
jConfiguration);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsBuilderConnect_JNI(%s)",
|
||||
hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
||||
goto done;
|
||||
}
|
||||
jFS = jVal.l;
|
||||
}
|
||||
} else {
|
||||
if (!strcmp(bld->nn_jni, "default")) {
|
||||
// jURI = FileSystem.getDefaultUri(conf)
|
||||
jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
|
||||
"getDefaultUri",
|
||||
"(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/URI;",
|
||||
jConfiguration);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsBuilderConnect_JNI(%s)",
|
||||
hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
||||
goto done;
|
||||
}
|
||||
jURI = jVal.l;
|
||||
} else {
|
||||
// fs = FileSystem#get(URI, conf, ugi);
|
||||
ret = calcEffectiveURI(bld, &cURI);
|
||||
if (ret)
|
||||
goto done;
|
||||
jthr = newJavaStr(env, cURI, &jURIString);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsBuilderConnect_JNI(%s)",
|
||||
hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
||||
goto done;
|
||||
}
|
||||
jthr = invokeMethod(env, &jVal, STATIC, NULL, JAVA_NET_URI,
|
||||
"create", "(Ljava/lang/String;)Ljava/net/URI;",
|
||||
jURIString);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsBuilderConnect_JNI(%s)",
|
||||
hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
||||
goto done;
|
||||
}
|
||||
jURI = jVal.l;
|
||||
}
|
||||
|
||||
if (bld->kerbTicketCachePath) {
|
||||
jthr = hadoopConfSetStr(env, jConfiguration,
|
||||
KERBEROS_TICKET_CACHE_PATH, bld->kerbTicketCachePath);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsBuilderConnect_JNI(%s)",
|
||||
hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
jthr = newJavaStr(env, bld->userName, &jUserString);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsBuilderConnect_JNI(%s)",
|
||||
hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
||||
goto done;
|
||||
}
|
||||
if (bld->forceNewInstance) {
|
||||
jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
|
||||
"newInstance", JMETHOD3(JPARAM(JAVA_NET_URI),
|
||||
JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING),
|
||||
JPARAM(HADOOP_FS)),
|
||||
jURI, jConfiguration, jUserString);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsBuilderConnect_JNI(%s)",
|
||||
hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
||||
goto done;
|
||||
}
|
||||
jFS = jVal.l;
|
||||
} else {
|
||||
jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "get",
|
||||
JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF),
|
||||
JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)),
|
||||
jURI, jConfiguration, jUserString);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsBuilderConnect_JNI(%s)",
|
||||
hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
||||
goto done;
|
||||
}
|
||||
jFS = jVal.l;
|
||||
}
|
||||
}
|
||||
jRet = (*env)->NewGlobalRef(env, jFS);
|
||||
if (!jRet) {
|
||||
ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
|
||||
"hdfsBuilderConnect_JNI(%s)",
|
||||
hdfsBuilderToStr(bld, buf, sizeof(buf)));
|
||||
goto done;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
done:
|
||||
// Release unnecessary local references
|
||||
destroyLocalReference(env, jConfiguration);
|
||||
destroyLocalReference(env, jFS);
|
||||
destroyLocalReference(env, jURI);
|
||||
destroyLocalReference(env, jCachePath);
|
||||
destroyLocalReference(env, jURIString);
|
||||
destroyLocalReference(env, jUserString);
|
||||
free(cURI);
|
||||
|
||||
if (ret) {
|
||||
errno = ret;
|
||||
return NULL;
|
||||
}
|
||||
return jRet;
|
||||
}
|
||||
|
||||
int hdfsDisconnect_JNI(jobject jFS)
|
||||
{
|
||||
// JAVA EQUIVALENT:
|
||||
// fs.close()
|
||||
|
||||
//Get the JNIEnv* corresponding to current thread
|
||||
JNIEnv* env = getJNIEnv();
|
||||
int ret;
|
||||
|
||||
if (env == NULL) {
|
||||
errno = EINTERNAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
//Sanity check
|
||||
if (jFS == NULL) {
|
||||
errno = EBADF;
|
||||
return -1;
|
||||
}
|
||||
|
||||
jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
|
||||
"close", "()V");
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsDisconnect: FileSystem#close");
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
(*env)->DeleteGlobalRef(env, jFS);
|
||||
if (ret) {
|
||||
errno = ret;
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hdfsCopyImpl(hdfsFS srcFS, const char* src, hdfsFS dstFS,
|
||||
const char* dst, jboolean deleteSource)
|
||||
{
|
||||
//JAVA EQUIVALENT
|
||||
// FileUtil#copy(srcFS, srcPath, dstFS, dstPath,
|
||||
// deleteSource = false, conf)
|
||||
|
||||
//Get the JNIEnv* corresponding to current thread
|
||||
JNIEnv* env = getJNIEnv();
|
||||
if (env == NULL) {
|
||||
errno = EINTERNAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
//In libwebhdfs, the hdfsFS derived from hdfsBuilderConnect series functions
|
||||
//is actually a hdfsBuilder instance containing address information of NameNode.
|
||||
//Thus here we need to use JNI to get the real java FileSystem objects.
|
||||
jobject jSrcFS = hdfsBuilderConnect_JNI(env, (struct hdfsBuilder *) srcFS);
|
||||
jobject jDstFS = hdfsBuilderConnect_JNI(env, (struct hdfsBuilder *) dstFS);
|
||||
|
||||
//Parameters
|
||||
jobject jConfiguration = NULL, jSrcPath = NULL, jDstPath = NULL;
|
||||
jthrowable jthr;
|
||||
jvalue jVal;
|
||||
int ret;
|
||||
|
||||
jthr = constructNewObjectOfPath(env, src, &jSrcPath);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsCopyImpl(src=%s): constructNewObjectOfPath", src);
|
||||
goto done;
|
||||
}
|
||||
jthr = constructNewObjectOfPath(env, dst, &jDstPath);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsCopyImpl(dst=%s): constructNewObjectOfPath", dst);
|
||||
goto done;
|
||||
}
|
||||
|
||||
//Create the org.apache.hadoop.conf.Configuration object
|
||||
jthr = constructNewObjectOfClass(env, &jConfiguration,
|
||||
HADOOP_CONF, "()V");
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsCopyImpl: Configuration constructor");
|
||||
goto done;
|
||||
}
|
||||
|
||||
//FileUtil#copy
|
||||
jthr = invokeMethod(env, &jVal, STATIC,
|
||||
NULL, "org/apache/hadoop/fs/FileUtil", "copy",
|
||||
"(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
|
||||
"Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
|
||||
"ZLorg/apache/hadoop/conf/Configuration;)Z",
|
||||
jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
|
||||
jConfiguration);
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsCopyImpl(src=%s, dst=%s, deleteSource=%d): "
|
||||
"FileUtil#copy", src, dst, deleteSource);
|
||||
goto done;
|
||||
}
|
||||
if (!jVal.z) {
|
||||
ret = EIO;
|
||||
goto done;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
done:
|
||||
destroyLocalReference(env, jConfiguration);
|
||||
destroyLocalReference(env, jSrcPath);
|
||||
destroyLocalReference(env, jDstPath);
|
||||
//Disconnect src/dst FileSystem
|
||||
hdfsDisconnect_JNI(jSrcFS);
|
||||
hdfsDisconnect_JNI(jDstFS);
|
||||
|
||||
if (ret) {
|
||||
errno = ret;
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
|
||||
{
|
||||
return hdfsCopyImpl(srcFS, src, dstFS, dst, 0);
|
||||
}
|
||||
|
||||
int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
|
||||
{
|
||||
return hdfsCopyImpl(srcFS, src, dstFS, dst, 1);
|
||||
}
|
||||
|
||||
tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
|
||||
{
|
||||
// JAVA EQUIVALENT:
|
||||
// fs.getDefaultBlockSize();
|
||||
|
||||
//Get the JNIEnv* corresponding to current thread
|
||||
JNIEnv* env = getJNIEnv();
|
||||
if (env == NULL) {
|
||||
errno = EINTERNAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
//In libwebhdfs, the hdfsFS derived from hdfsConnect functions
|
||||
//is actually a hdfsBuilder instance containing address information of NameNode.
|
||||
//Thus here we need to use JNI to get the real java FileSystem objects.
|
||||
jobject jFS = hdfsBuilderConnect_JNI(env, (struct hdfsBuilder *) fs);
|
||||
|
||||
//FileSystem#getDefaultBlockSize()
|
||||
jvalue jVal;
|
||||
jthrowable jthr;
|
||||
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
|
||||
"getDefaultBlockSize", "()J");
|
||||
if (jthr) {
|
||||
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsGetDefaultBlockSize: FileSystem#getDefaultBlockSize");
|
||||
//Disconnect
|
||||
hdfsDisconnect_JNI(jFS);
|
||||
return -1;
|
||||
}
|
||||
|
||||
//Disconnect
|
||||
hdfsDisconnect_JNI(jFS);
|
||||
return jVal.j;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -15,14 +15,76 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "exception.h"
|
||||
#include "hdfs.h" /* for hdfsFileInfo */
|
||||
#include "hdfs_json_parser.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
#include <jansson.h>
|
||||
#include "hdfs_json_parser.h"
|
||||
#include "exception.h"
|
||||
|
||||
hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat, int *numEntries, const char *operation); //Forward Declaration
|
||||
/**
|
||||
* Exception information after calling JSON operations
|
||||
*/
|
||||
struct jsonException {
|
||||
const char *exception;
|
||||
const char *javaClassName;
|
||||
const char *message;
|
||||
};
|
||||
|
||||
static hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat,
|
||||
int *numEntries, const char *operation);
|
||||
|
||||
static void dotsToSlashes(char *str)
|
||||
{
|
||||
for (; *str != '\0'; str++) {
|
||||
if (*str == '.')
|
||||
*str = '/';
|
||||
}
|
||||
}
|
||||
|
||||
int printJsonExceptionV(struct jsonException *exc, int noPrintFlags,
|
||||
const char *fmt, va_list ap)
|
||||
{
|
||||
char *javaClassName = NULL;
|
||||
int excErrno = EINTERNAL, shouldPrint = 0;
|
||||
if (!exc) {
|
||||
fprintf(stderr, "printJsonExceptionV: the jsonException is NULL\n");
|
||||
return EINTERNAL;
|
||||
}
|
||||
javaClassName = strdup(exc->javaClassName);
|
||||
if (!javaClassName) {
|
||||
fprintf(stderr, "printJsonExceptionV: internal out of memory error\n");
|
||||
return EINTERNAL;
|
||||
}
|
||||
dotsToSlashes(javaClassName);
|
||||
getExceptionInfo(javaClassName, noPrintFlags, &excErrno, &shouldPrint);
|
||||
free(javaClassName);
|
||||
|
||||
if (shouldPrint) {
|
||||
vfprintf(stderr, fmt, ap);
|
||||
fprintf(stderr, " error:\n");
|
||||
fprintf(stderr, "Exception: %s\nJavaClassName: %s\nMessage: %s\n",
|
||||
exc->exception, exc->javaClassName, exc->message);
|
||||
}
|
||||
|
||||
free(exc);
|
||||
return excErrno;
|
||||
}
|
||||
|
||||
int printJsonException(struct jsonException *exc, int noPrintFlags,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int ret;
|
||||
|
||||
va_start(ap, fmt);
|
||||
ret = printJsonExceptionV(exc, noPrintFlags, fmt, ap);
|
||||
va_end(ap);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static hdfsFileInfo *json_parse_array(json_t *jobj, char *key, hdfsFileInfo *fileStat, int *numEntries, const char *operation) {
|
||||
int arraylen = json_array_size(jobj); //Getting the length of the array
|
||||
|
@ -88,12 +150,12 @@ int parseDELETE(char *response) {
|
|||
return (parseBoolean(response));
|
||||
}
|
||||
|
||||
hdfs_exception_msg *parseJsonException(json_t *jobj) {
|
||||
struct jsonException *parseJsonException(json_t *jobj) {
|
||||
const char *key;
|
||||
json_t *value;
|
||||
hdfs_exception_msg *exception = NULL;
|
||||
struct jsonException *exception = NULL;
|
||||
|
||||
exception = (hdfs_exception_msg *) calloc(1, sizeof(hdfs_exception_msg));
|
||||
exception = calloc(1, sizeof(*exception));
|
||||
if (!exception) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -117,7 +179,7 @@ hdfs_exception_msg *parseJsonException(json_t *jobj) {
|
|||
return exception;
|
||||
}
|
||||
|
||||
hdfs_exception_msg *parseException(const char *content) {
|
||||
struct jsonException *parseException(const char *content) {
|
||||
if (!content) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -145,7 +207,9 @@ hdfs_exception_msg *parseException(const char *content) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat, int *numEntries, const char *operation) {
|
||||
static hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat,
|
||||
int *numEntries, const char *operation)
|
||||
{
|
||||
const char *tempstr;
|
||||
const char *key;
|
||||
json_t *value;
|
||||
|
@ -196,9 +260,9 @@ hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat, int *numEntries
|
|||
fileStat = parseJsonGFS(value, &fileStat[0], numEntries, operation);
|
||||
} else if (!strcmp(key,"RemoteException")) {
|
||||
//Besides returning NULL, we also need to print the exception information
|
||||
hdfs_exception_msg *exception = parseJsonException(value);
|
||||
struct jsonException *exception = parseJsonException(value);
|
||||
if (exception) {
|
||||
errno = printExceptionWeb(exception, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
|
||||
errno = printJsonException(exception, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
|
||||
}
|
||||
|
||||
if(fileStat != NULL) {
|
||||
|
@ -234,9 +298,9 @@ int checkHeader(char *header, const char *content, const char *operation) {
|
|||
return 0;
|
||||
}
|
||||
if(!(strstr(header, responseCode)) || !(header = strstr(header, "Content-Length"))) {
|
||||
hdfs_exception_msg *exc = parseException(content);
|
||||
struct jsonException *exc = parseException(content);
|
||||
if (exc) {
|
||||
errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
|
||||
errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -259,14 +323,14 @@ int parseOPEN(const char *header, const char *content) {
|
|||
return -1;
|
||||
}
|
||||
if(!(strstr(header,responseCode1) && strstr(header, responseCode2))) {
|
||||
hdfs_exception_msg *exc = parseException(content);
|
||||
struct jsonException *exc = parseException(content);
|
||||
if (exc) {
|
||||
//if the exception is an IOException and it is because the offset is out of the range
|
||||
//do not print out the exception
|
||||
if (!strcasecmp(exc->exception, "IOException") && strstr(exc->message, "out of the range")) {
|
||||
return 0;
|
||||
}
|
||||
errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (OPEN)");
|
||||
errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (OPEN)");
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
@ -297,9 +361,9 @@ int checkIfRedirect(const char *const headerstr, const char *content, const char
|
|||
}
|
||||
if(!(tempHeader = strstr(headerstr,responseCode))) {
|
||||
//process possible exception information
|
||||
hdfs_exception_msg *exc = parseException(content);
|
||||
struct jsonException *exc = parseException(content);
|
||||
if (exc) {
|
||||
errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
|
||||
errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -350,9 +414,9 @@ int parseDnWRITE(const char *header, const char *content) {
|
|||
return 0;
|
||||
}
|
||||
if(!(strstr(header,responseCode))) {
|
||||
hdfs_exception_msg *exc = parseException(content);
|
||||
struct jsonException *exc = parseException(content);
|
||||
if (exc) {
|
||||
errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (WRITE(DataNode))");
|
||||
errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (WRITE(DataNode))");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -365,9 +429,9 @@ int parseDnAPPEND(const char *header, const char *content) {
|
|||
return 0;
|
||||
}
|
||||
if(!(strstr(header, responseCode))) {
|
||||
hdfs_exception_msg *exc = parseException(content);
|
||||
struct jsonException *exc = parseException(content);
|
||||
if (exc) {
|
||||
errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (APPEND(DataNode))");
|
||||
errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (APPEND(DataNode))");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -17,7 +17,23 @@
|
|||
*/
|
||||
#ifndef _HDFS_JSON_PARSER_H_
|
||||
#define _HDFS_JSON_PARSER_H_
|
||||
#include "webhdfs.h"
|
||||
|
||||
struct jsonException;
|
||||
|
||||
/**
|
||||
* Print out JSON exception information.
|
||||
*
|
||||
* @param exc The exception information to print and free
|
||||
* @param noPrintFlags Flags which determine which exceptions we should NOT
|
||||
* print.
|
||||
* @param fmt Printf-style format list
|
||||
* @param ... Printf-style varargs
|
||||
*
|
||||
* @return The POSIX error number associated with the exception
|
||||
* object.
|
||||
*/
|
||||
int printJsonException(struct jsonException *exc, int noPrintFlags,
|
||||
const char *fmt, ...);
|
||||
|
||||
int parseMKDIR(char *response);
|
||||
int parseRENAME(char *response);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,609 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
//#include "config.h"
|
||||
#include "exception.h"
|
||||
#include "jni_helper.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
static pthread_mutex_t hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
static pthread_mutex_t jvmMutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
static volatile int hashTableInited = 0;
|
||||
|
||||
#define LOCK_HASH_TABLE() pthread_mutex_lock(&hdfsHashMutex)
|
||||
#define UNLOCK_HASH_TABLE() pthread_mutex_unlock(&hdfsHashMutex)
|
||||
|
||||
|
||||
/** The Native return types that methods could return */
|
||||
#define VOID 'V'
|
||||
#define JOBJECT 'L'
|
||||
#define JARRAYOBJECT '['
|
||||
#define JBOOLEAN 'Z'
|
||||
#define JBYTE 'B'
|
||||
#define JCHAR 'C'
|
||||
#define JSHORT 'S'
|
||||
#define JINT 'I'
|
||||
#define JLONG 'J'
|
||||
#define JFLOAT 'F'
|
||||
#define JDOUBLE 'D'
|
||||
|
||||
|
||||
/**
|
||||
* MAX_HASH_TABLE_ELEM: The maximum no. of entries in the hashtable.
|
||||
* It's set to 4096 to account for (classNames + No. of threads)
|
||||
*/
|
||||
#define MAX_HASH_TABLE_ELEM 4096
|
||||
|
||||
/** Key that allows us to retrieve thread-local storage */
|
||||
static pthread_key_t gTlsKey;
|
||||
|
||||
/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
|
||||
static int gTlsKeyInitialized = 0;
|
||||
|
||||
/** Pthreads thread-local storage for each library thread. */
|
||||
struct hdfsTls {
|
||||
JNIEnv *env;
|
||||
};
|
||||
|
||||
/**
|
||||
* The function that is called whenever a thread with libhdfs thread local data
|
||||
* is destroyed.
|
||||
*
|
||||
* @param v The thread-local data
|
||||
*/
|
||||
static void hdfsThreadDestructor(void *v)
|
||||
{
|
||||
struct hdfsTls *tls = v;
|
||||
JavaVM *vm;
|
||||
JNIEnv *env = tls->env;
|
||||
jint ret;
|
||||
|
||||
ret = (*env)->GetJavaVM(env, &vm);
|
||||
if (ret) {
|
||||
fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with "
|
||||
"error %d\n", ret);
|
||||
(*env)->ExceptionDescribe(env);
|
||||
} else {
|
||||
(*vm)->DetachCurrentThread(vm);
|
||||
}
|
||||
free(tls);
|
||||
}
|
||||
|
||||
void destroyLocalReference(JNIEnv *env, jobject jObject)
|
||||
{
|
||||
if (jObject)
|
||||
(*env)->DeleteLocalRef(env, jObject);
|
||||
}
|
||||
|
||||
static jthrowable validateMethodType(JNIEnv *env, MethType methType)
|
||||
{
|
||||
if (methType != STATIC && methType != INSTANCE) {
|
||||
return newRuntimeError(env, "validateMethodType(methType=%d): "
|
||||
"illegal method type.\n", methType);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
jthrowable newJavaStr(JNIEnv *env, const char *str, jstring *out)
|
||||
{
|
||||
jstring jstr;
|
||||
|
||||
if (!str) {
|
||||
/* Can't pass NULL to NewStringUTF: the result would be
|
||||
* implementation-defined. */
|
||||
*out = NULL;
|
||||
return NULL;
|
||||
}
|
||||
jstr = (*env)->NewStringUTF(env, str);
|
||||
if (!jstr) {
|
||||
/* If NewStringUTF returns NULL, an exception has been thrown,
|
||||
* which we need to handle. Probaly an OOM. */
|
||||
return getPendingExceptionAndClear(env);
|
||||
}
|
||||
*out = jstr;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
jthrowable newCStr(JNIEnv *env, jstring jstr, char **out)
|
||||
{
|
||||
const char *tmp;
|
||||
|
||||
if (!jstr) {
|
||||
*out = NULL;
|
||||
return NULL;
|
||||
}
|
||||
tmp = (*env)->GetStringUTFChars(env, jstr, NULL);
|
||||
if (!tmp) {
|
||||
return getPendingExceptionAndClear(env);
|
||||
}
|
||||
*out = strdup(tmp);
|
||||
(*env)->ReleaseStringUTFChars(env, jstr, tmp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int hashTableInit(void)
|
||||
{
|
||||
if (!hashTableInited) {
|
||||
LOCK_HASH_TABLE();
|
||||
if (!hashTableInited) {
|
||||
if (hcreate(MAX_HASH_TABLE_ELEM) == 0) {
|
||||
fprintf(stderr, "error creating hashtable, <%d>: %s\n",
|
||||
errno, strerror(errno));
|
||||
return 0;
|
||||
}
|
||||
hashTableInited = 1;
|
||||
}
|
||||
UNLOCK_HASH_TABLE();
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
static int insertEntryIntoTable(const char *key, void *data)
|
||||
{
|
||||
ENTRY e, *ep;
|
||||
if (key == NULL || data == NULL) {
|
||||
return 0;
|
||||
}
|
||||
if (! hashTableInit()) {
|
||||
return -1;
|
||||
}
|
||||
e.data = data;
|
||||
e.key = (char*)key;
|
||||
LOCK_HASH_TABLE();
|
||||
ep = hsearch(e, ENTER);
|
||||
UNLOCK_HASH_TABLE();
|
||||
if (ep == NULL) {
|
||||
fprintf(stderr, "warn adding key (%s) to hash table, <%d>: %s\n",
|
||||
key, errno, strerror(errno));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void* searchEntryFromTable(const char *key)
|
||||
{
|
||||
ENTRY e,*ep;
|
||||
if (key == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
hashTableInit();
|
||||
e.key = (char*)key;
|
||||
LOCK_HASH_TABLE();
|
||||
ep = hsearch(e, FIND);
|
||||
UNLOCK_HASH_TABLE();
|
||||
if (ep != NULL) {
|
||||
return ep->data;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
|
||||
jobject instObj, const char *className,
|
||||
const char *methName, const char *methSignature, ...)
|
||||
{
|
||||
va_list args;
|
||||
jclass cls;
|
||||
jmethodID mid;
|
||||
jthrowable jthr;
|
||||
const char *str;
|
||||
char returnType;
|
||||
|
||||
jthr = validateMethodType(env, methType);
|
||||
if (jthr)
|
||||
return jthr;
|
||||
jthr = globalClassReference(className, env, &cls);
|
||||
if (jthr)
|
||||
return jthr;
|
||||
jthr = methodIdFromClass(className, methName, methSignature,
|
||||
methType, env, &mid);
|
||||
if (jthr)
|
||||
return jthr;
|
||||
str = methSignature;
|
||||
while (*str != ')') str++;
|
||||
str++;
|
||||
returnType = *str;
|
||||
va_start(args, methSignature);
|
||||
if (returnType == JOBJECT || returnType == JARRAYOBJECT) {
|
||||
jobject jobj = NULL;
|
||||
if (methType == STATIC) {
|
||||
jobj = (*env)->CallStaticObjectMethodV(env, cls, mid, args);
|
||||
}
|
||||
else if (methType == INSTANCE) {
|
||||
jobj = (*env)->CallObjectMethodV(env, instObj, mid, args);
|
||||
}
|
||||
retval->l = jobj;
|
||||
}
|
||||
else if (returnType == VOID) {
|
||||
if (methType == STATIC) {
|
||||
(*env)->CallStaticVoidMethodV(env, cls, mid, args);
|
||||
}
|
||||
else if (methType == INSTANCE) {
|
||||
(*env)->CallVoidMethodV(env, instObj, mid, args);
|
||||
}
|
||||
}
|
||||
else if (returnType == JBOOLEAN) {
|
||||
jboolean jbool = 0;
|
||||
if (methType == STATIC) {
|
||||
jbool = (*env)->CallStaticBooleanMethodV(env, cls, mid, args);
|
||||
}
|
||||
else if (methType == INSTANCE) {
|
||||
jbool = (*env)->CallBooleanMethodV(env, instObj, mid, args);
|
||||
}
|
||||
retval->z = jbool;
|
||||
}
|
||||
else if (returnType == JSHORT) {
|
||||
jshort js = 0;
|
||||
if (methType == STATIC) {
|
||||
js = (*env)->CallStaticShortMethodV(env, cls, mid, args);
|
||||
}
|
||||
else if (methType == INSTANCE) {
|
||||
js = (*env)->CallShortMethodV(env, instObj, mid, args);
|
||||
}
|
||||
retval->s = js;
|
||||
}
|
||||
else if (returnType == JLONG) {
|
||||
jlong jl = -1;
|
||||
if (methType == STATIC) {
|
||||
jl = (*env)->CallStaticLongMethodV(env, cls, mid, args);
|
||||
}
|
||||
else if (methType == INSTANCE) {
|
||||
jl = (*env)->CallLongMethodV(env, instObj, mid, args);
|
||||
}
|
||||
retval->j = jl;
|
||||
}
|
||||
else if (returnType == JINT) {
|
||||
jint ji = -1;
|
||||
if (methType == STATIC) {
|
||||
ji = (*env)->CallStaticIntMethodV(env, cls, mid, args);
|
||||
}
|
||||
else if (methType == INSTANCE) {
|
||||
ji = (*env)->CallIntMethodV(env, instObj, mid, args);
|
||||
}
|
||||
retval->i = ji;
|
||||
}
|
||||
va_end(args);
|
||||
|
||||
jthr = (*env)->ExceptionOccurred(env);
|
||||
if (jthr) {
|
||||
(*env)->ExceptionClear(env);
|
||||
return jthr;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className,
|
||||
const char *ctorSignature, ...)
|
||||
{
|
||||
va_list args;
|
||||
jclass cls;
|
||||
jmethodID mid;
|
||||
jobject jobj;
|
||||
jthrowable jthr;
|
||||
|
||||
jthr = globalClassReference(className, env, &cls);
|
||||
if (jthr)
|
||||
return jthr;
|
||||
jthr = methodIdFromClass(className, "<init>", ctorSignature,
|
||||
INSTANCE, env, &mid);
|
||||
if (jthr)
|
||||
return jthr;
|
||||
va_start(args, ctorSignature);
|
||||
jobj = (*env)->NewObjectV(env, cls, mid, args);
|
||||
va_end(args);
|
||||
if (!jobj)
|
||||
return getPendingExceptionAndClear(env);
|
||||
*out = jobj;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
jthrowable methodIdFromClass(const char *className, const char *methName,
|
||||
const char *methSignature, MethType methType,
|
||||
JNIEnv *env, jmethodID *out)
|
||||
{
|
||||
jclass cls;
|
||||
jthrowable jthr;
|
||||
|
||||
jthr = globalClassReference(className, env, &cls);
|
||||
if (jthr)
|
||||
return jthr;
|
||||
jmethodID mid = 0;
|
||||
jthr = validateMethodType(env, methType);
|
||||
if (jthr)
|
||||
return jthr;
|
||||
if (methType == STATIC) {
|
||||
mid = (*env)->GetStaticMethodID(env, cls, methName, methSignature);
|
||||
}
|
||||
else if (methType == INSTANCE) {
|
||||
mid = (*env)->GetMethodID(env, cls, methName, methSignature);
|
||||
}
|
||||
if (mid == NULL) {
|
||||
fprintf(stderr, "could not find method %s from class %s with "
|
||||
"signature %s\n", methName, className, methSignature);
|
||||
return getPendingExceptionAndClear(env);
|
||||
}
|
||||
*out = mid;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out)
|
||||
{
|
||||
jclass clsLocalRef;
|
||||
jclass cls = searchEntryFromTable(className);
|
||||
if (cls) {
|
||||
*out = cls;
|
||||
return NULL;
|
||||
}
|
||||
clsLocalRef = (*env)->FindClass(env,className);
|
||||
if (clsLocalRef == NULL) {
|
||||
return getPendingExceptionAndClear(env);
|
||||
}
|
||||
cls = (*env)->NewGlobalRef(env, clsLocalRef);
|
||||
if (cls == NULL) {
|
||||
(*env)->DeleteLocalRef(env, clsLocalRef);
|
||||
return getPendingExceptionAndClear(env);
|
||||
}
|
||||
(*env)->DeleteLocalRef(env, clsLocalRef);
|
||||
insertEntryIntoTable(className, cls);
|
||||
*out = cls;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
|
||||
{
|
||||
jthrowable jthr;
|
||||
jclass cls, clsClass = NULL;
|
||||
jmethodID mid;
|
||||
jstring str = NULL;
|
||||
const char *cstr = NULL;
|
||||
char *newstr;
|
||||
|
||||
cls = (*env)->GetObjectClass(env, jobj);
|
||||
if (cls == NULL) {
|
||||
jthr = getPendingExceptionAndClear(env);
|
||||
goto done;
|
||||
}
|
||||
clsClass = (*env)->FindClass(env, "java/lang/Class");
|
||||
if (clsClass == NULL) {
|
||||
jthr = getPendingExceptionAndClear(env);
|
||||
goto done;
|
||||
}
|
||||
mid = (*env)->GetMethodID(env, clsClass, "getName", "()Ljava/lang/String;");
|
||||
if (mid == NULL) {
|
||||
jthr = getPendingExceptionAndClear(env);
|
||||
goto done;
|
||||
}
|
||||
str = (*env)->CallObjectMethod(env, cls, mid);
|
||||
if (str == NULL) {
|
||||
jthr = getPendingExceptionAndClear(env);
|
||||
goto done;
|
||||
}
|
||||
cstr = (*env)->GetStringUTFChars(env, str, NULL);
|
||||
if (!cstr) {
|
||||
jthr = getPendingExceptionAndClear(env);
|
||||
goto done;
|
||||
}
|
||||
newstr = strdup(cstr);
|
||||
if (newstr == NULL) {
|
||||
jthr = newRuntimeError(env, "classNameOfObject: out of memory");
|
||||
goto done;
|
||||
}
|
||||
*name = newstr;
|
||||
jthr = NULL;
|
||||
|
||||
done:
|
||||
destroyLocalReference(env, cls);
|
||||
destroyLocalReference(env, clsClass);
|
||||
if (str) {
|
||||
if (cstr)
|
||||
(*env)->ReleaseStringUTFChars(env, str, cstr);
|
||||
(*env)->DeleteLocalRef(env, str);
|
||||
}
|
||||
return jthr;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get the global JNI environemnt.
|
||||
*
|
||||
* We only have to create the JVM once. After that, we can use it in
|
||||
* every thread. You must be holding the jvmMutex when you call this
|
||||
* function.
|
||||
*
|
||||
* @return The JNIEnv on success; error code otherwise
|
||||
*/
|
||||
static JNIEnv* getGlobalJNIEnv(void)
|
||||
{
|
||||
const jsize vmBufLength = 1;
|
||||
JavaVM* vmBuf[vmBufLength];
|
||||
JNIEnv *env;
|
||||
jint rv = 0;
|
||||
jint noVMs = 0;
|
||||
jthrowable jthr;
|
||||
|
||||
rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs);
|
||||
if (rv != 0) {
|
||||
fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (noVMs == 0) {
|
||||
//Get the environment variables for initializing the JVM
|
||||
char *hadoopClassPath = getenv("CLASSPATH");
|
||||
if (hadoopClassPath == NULL) {
|
||||
fprintf(stderr, "Environment variable CLASSPATH not set!\n");
|
||||
return NULL;
|
||||
}
|
||||
char *hadoopClassPathVMArg = "-Djava.class.path=";
|
||||
size_t optHadoopClassPathLen = strlen(hadoopClassPath) +
|
||||
strlen(hadoopClassPathVMArg) + 1;
|
||||
char *optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
|
||||
snprintf(optHadoopClassPath, optHadoopClassPathLen,
|
||||
"%s%s", hadoopClassPathVMArg, hadoopClassPath);
|
||||
|
||||
// Determine the # of LIBHDFS_OPTS args
|
||||
int noArgs = 1;
|
||||
char *hadoopJvmArgs = getenv("LIBHDFS_OPTS");
|
||||
char jvmArgDelims[] = " ";
|
||||
char *str, *token, *savePtr;
|
||||
if (hadoopJvmArgs != NULL) {
|
||||
hadoopJvmArgs = strdup(hadoopJvmArgs);
|
||||
for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
|
||||
token = strtok_r(str, jvmArgDelims, &savePtr);
|
||||
if (NULL == token) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
free(hadoopJvmArgs);
|
||||
}
|
||||
|
||||
// Now that we know the # args, populate the options array
|
||||
JavaVMOption options[noArgs];
|
||||
options[0].optionString = optHadoopClassPath;
|
||||
hadoopJvmArgs = getenv("LIBHDFS_OPTS");
|
||||
if (hadoopJvmArgs != NULL) {
|
||||
hadoopJvmArgs = strdup(hadoopJvmArgs);
|
||||
for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
|
||||
token = strtok_r(str, jvmArgDelims, &savePtr);
|
||||
if (NULL == token) {
|
||||
break;
|
||||
}
|
||||
options[noArgs].optionString = token;
|
||||
}
|
||||
}
|
||||
|
||||
//Create the VM
|
||||
JavaVMInitArgs vm_args;
|
||||
JavaVM *vm;
|
||||
vm_args.version = JNI_VERSION_1_2;
|
||||
vm_args.options = options;
|
||||
vm_args.nOptions = noArgs;
|
||||
vm_args.ignoreUnrecognized = 1;
|
||||
|
||||
rv = JNI_CreateJavaVM(&vm, (void*)&env, &vm_args);
|
||||
|
||||
if (hadoopJvmArgs != NULL) {
|
||||
free(hadoopJvmArgs);
|
||||
}
|
||||
free(optHadoopClassPath);
|
||||
|
||||
if (rv != 0) {
|
||||
fprintf(stderr, "Call to JNI_CreateJavaVM failed "
|
||||
"with error: %d\n", rv);
|
||||
return NULL;
|
||||
}
|
||||
jthr = invokeMethod(env, NULL, STATIC, NULL,
|
||||
"org/apache/hadoop/fs/FileSystem",
|
||||
"loadFileSystems", "()V");
|
||||
if (jthr) {
|
||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "loadFileSystems");
|
||||
}
|
||||
}
|
||||
else {
|
||||
//Attach this thread to the VM
|
||||
JavaVM* vm = vmBuf[0];
|
||||
rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0);
|
||||
if (rv != 0) {
|
||||
fprintf(stderr, "Call to AttachCurrentThread "
|
||||
"failed with error: %d\n", rv);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return env;
|
||||
}
|
||||
|
||||
/**
|
||||
* getJNIEnv: A helper function to get the JNIEnv* for the given thread.
|
||||
* If no JVM exists, then one will be created. JVM command line arguments
|
||||
* are obtained from the LIBHDFS_OPTS environment variable.
|
||||
*
|
||||
* Implementation note: we rely on POSIX thread-local storage (tls).
|
||||
* This allows us to associate a destructor function with each thread, that
|
||||
* will detach the thread from the Java VM when the thread terminates. If we
|
||||
* failt to do this, it will cause a memory leak.
|
||||
*
|
||||
* However, POSIX TLS is not the most efficient way to do things. It requires a
|
||||
* key to be initialized before it can be used. Since we don't know if this key
|
||||
* is initialized at the start of this function, we have to lock a mutex first
|
||||
* and check. Luckily, most operating systems support the more efficient
|
||||
* __thread construct, which is initialized by the linker.
|
||||
*
|
||||
* @param: None.
|
||||
* @return The JNIEnv* corresponding to the thread.
|
||||
*/
|
||||
JNIEnv* getJNIEnv(void)
|
||||
{
|
||||
JNIEnv *env;
|
||||
struct hdfsTls *tls;
|
||||
int ret;
|
||||
|
||||
#ifdef HAVE_BETTER_TLS
|
||||
static __thread struct hdfsTls *quickTls = NULL;
|
||||
if (quickTls)
|
||||
return quickTls->env;
|
||||
#endif
|
||||
pthread_mutex_lock(&jvmMutex);
|
||||
if (!gTlsKeyInitialized) {
|
||||
ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
|
||||
if (ret) {
|
||||
pthread_mutex_unlock(&jvmMutex);
|
||||
fprintf(stderr, "getJNIEnv: pthread_key_create failed with "
|
||||
"error %d\n", ret);
|
||||
return NULL;
|
||||
}
|
||||
gTlsKeyInitialized = 1;
|
||||
}
|
||||
tls = pthread_getspecific(gTlsKey);
|
||||
if (tls) {
|
||||
pthread_mutex_unlock(&jvmMutex);
|
||||
return tls->env;
|
||||
}
|
||||
|
||||
env = getGlobalJNIEnv();
|
||||
pthread_mutex_unlock(&jvmMutex);
|
||||
if (!env) {
|
||||
fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
|
||||
return NULL;
|
||||
}
|
||||
tls = calloc(1, sizeof(struct hdfsTls));
|
||||
if (!tls) {
|
||||
fprintf(stderr, "getJNIEnv: OOM allocating %zd bytes\n",
|
||||
sizeof(struct hdfsTls));
|
||||
return NULL;
|
||||
}
|
||||
tls->env = env;
|
||||
ret = pthread_setspecific(gTlsKey, tls);
|
||||
if (ret) {
|
||||
fprintf(stderr, "getJNIEnv: pthread_setspecific failed with "
|
||||
"error code %d\n", ret);
|
||||
hdfsThreadDestructor(tls);
|
||||
return NULL;
|
||||
}
|
||||
#ifdef HAVE_BETTER_TLS
|
||||
quickTls = tls;
|
||||
#endif
|
||||
return env;
|
||||
}
|
||||
|
|
@ -1,122 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef LIBHDFS_JNI_HELPER_H
|
||||
#define LIBHDFS_JNI_HELPER_H
|
||||
|
||||
#include <jni.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdarg.h>
|
||||
#include <search.h>
|
||||
#include <pthread.h>
|
||||
#include <errno.h>
|
||||
|
||||
#define PATH_SEPARATOR ':'
|
||||
|
||||
|
||||
/** Denote the method we want to invoke as STATIC or INSTANCE */
|
||||
typedef enum {
|
||||
STATIC,
|
||||
INSTANCE
|
||||
} MethType;
|
||||
|
||||
/**
|
||||
* Create a new malloc'ed C string from a Java string.
|
||||
*
|
||||
* @param env The JNI environment
|
||||
* @param jstr The Java string
|
||||
* @param out (out param) the malloc'ed C string
|
||||
*
|
||||
* @return NULL on success; the exception otherwise
|
||||
*/
|
||||
jthrowable newCStr(JNIEnv *env, jstring jstr, char **out);
|
||||
|
||||
/**
|
||||
* Create a new Java string from a C string.
|
||||
*
|
||||
* @param env The JNI environment
|
||||
* @param str The C string
|
||||
* @param out (out param) the java string
|
||||
*
|
||||
* @return NULL on success; the exception otherwise
|
||||
*/
|
||||
jthrowable newJavaStr(JNIEnv *env, const char *str, jstring *out);
|
||||
|
||||
/**
|
||||
* Helper function to destroy a local reference of java.lang.Object
|
||||
* @param env: The JNIEnv pointer.
|
||||
* @param jFile: The local reference of java.lang.Object object
|
||||
* @return None.
|
||||
*/
|
||||
void destroyLocalReference(JNIEnv *env, jobject jObject);
|
||||
|
||||
/** invokeMethod: Invoke a Static or Instance method.
|
||||
* className: Name of the class where the method can be found
|
||||
* methName: Name of the method
|
||||
* methSignature: the signature of the method "(arg-types)ret-type"
|
||||
* methType: The type of the method (STATIC or INSTANCE)
|
||||
* instObj: Required if the methType is INSTANCE. The object to invoke
|
||||
the method on.
|
||||
* env: The JNIEnv pointer
|
||||
* retval: The pointer to a union type which will contain the result of the
|
||||
method invocation, e.g. if the method returns an Object, retval will be
|
||||
set to that, if the method returns boolean, retval will be set to the
|
||||
value (JNI_TRUE or JNI_FALSE), etc.
|
||||
* exc: If the methods throws any exception, this will contain the reference
|
||||
* Arguments (the method arguments) must be passed after methSignature
|
||||
* RETURNS: -1 on error and 0 on success. If -1 is returned, exc will have
|
||||
a valid exception reference, and the result stored at retval is undefined.
|
||||
*/
|
||||
jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
|
||||
jobject instObj, const char *className, const char *methName,
|
||||
const char *methSignature, ...);
|
||||
|
||||
jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className,
|
||||
const char *ctorSignature, ...);
|
||||
|
||||
jthrowable methodIdFromClass(const char *className, const char *methName,
|
||||
const char *methSignature, MethType methType,
|
||||
JNIEnv *env, jmethodID *out);
|
||||
|
||||
jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out);
|
||||
|
||||
/** classNameOfObject: Get an object's class name.
|
||||
* @param jobj: The object.
|
||||
* @param env: The JNIEnv pointer.
|
||||
* @param name: (out param) On success, will contain a string containing the
|
||||
* class name. This string must be freed by the caller.
|
||||
* @return NULL on success, or the exception
|
||||
*/
|
||||
jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name);
|
||||
|
||||
/** getJNIEnv: A helper function to get the JNIEnv* for the given thread.
|
||||
* If no JVM exists, then one will be created. JVM command line arguments
|
||||
* are obtained from the LIBHDFS_OPTS environment variable.
|
||||
* @param: None.
|
||||
* @return The JNIEnv* corresponding to the thread.
|
||||
* */
|
||||
JNIEnv* getJNIEnv(void);
|
||||
|
||||
#endif /*LIBHDFS_JNI_HELPER_H*/
|
||||
|
||||
/**
|
||||
* vim: ts=4: sw=4: et:
|
||||
*/
|
||||
|
|
@ -17,7 +17,7 @@
|
|||
*/
|
||||
|
||||
#include "expect.h"
|
||||
#include "webhdfs.h"
|
||||
#include "hdfs.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <semaphore.h>
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "webhdfs.h"
|
||||
#include "hdfs.h"
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <jni.h>
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "webhdfs.h"
|
||||
#include "hdfs.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
*/
|
||||
|
||||
#include "expect.h"
|
||||
#include "webhdfs.h"
|
||||
#include "hdfs.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <semaphore.h>
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "webhdfs.h"
|
||||
#include "hdfs.h"
|
||||
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
#include "hdfs.h"
|
||||
|
||||
#include <time.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/time.h>
|
||||
#include "webhdfs.h"
|
||||
|
||||
#ifdef __MACH__
|
||||
#include <mach/clock.h>
|
||||
|
|
|
@ -1,694 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef LIB_WEBHDFS_H
|
||||
#define LIB_WEBHDFS_H
|
||||
|
||||
#include <errno.h> /* for EINTERNAL, etc. */
|
||||
#include <fcntl.h> /* for O_RDONLY, O_WRONLY */
|
||||
#include <stdint.h> /* for uint64_t, etc. */
|
||||
#include <time.h> /* for time_t */
|
||||
#include <pthread.h>
|
||||
|
||||
#ifndef O_RDONLY
|
||||
#define O_RDONLY 1
|
||||
#endif
|
||||
|
||||
#ifndef O_WRONLY
|
||||
#define O_WRONLY 2
|
||||
#endif
|
||||
|
||||
#ifndef EINTERNAL
|
||||
#define EINTERNAL 255
|
||||
#endif
|
||||
|
||||
/** All APIs set errno to meaningful values */
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
/**
|
||||
* Some utility decls used in libhdfs.
|
||||
*/
|
||||
typedef int32_t tSize; /// size of data for read/write io ops
|
||||
typedef time_t tTime; /// time type in seconds
|
||||
typedef int64_t tOffset;/// offset within the file
|
||||
typedef uint16_t tPort; /// port
|
||||
|
||||
/**
|
||||
* The information required for accessing webhdfs,
|
||||
* including the network address of the namenode and the user name
|
||||
*/
|
||||
struct hdfsBuilder {
|
||||
int forceNewInstance;
|
||||
const char *nn;
|
||||
const char *nn_jni;
|
||||
tPort port;
|
||||
const char *kerbTicketCachePath;
|
||||
const char *userName;
|
||||
/*
|
||||
* This is a new attribute compared to libhdfs.
|
||||
* We maintain a local workingDir for constructing absolute path
|
||||
*/
|
||||
char *workingDir;
|
||||
};
|
||||
|
||||
typedef enum tObjectKind {
|
||||
kObjectKindFile = 'F',
|
||||
kObjectKindDirectory = 'D',
|
||||
} tObjectKind;
|
||||
|
||||
/**
|
||||
* For libhdfs based on JNI, this is used as
|
||||
* the C reflection of org.apache.org.hadoop.FileSystem .
|
||||
* In the current libwebhdfs based on webhdfs,
|
||||
* this is actually hdfsBuilder which contains
|
||||
* the network address of the namenode and the user name
|
||||
*/
|
||||
struct hdfs_internal;
|
||||
typedef struct hdfs_internal* hdfsFS;
|
||||
|
||||
/**
|
||||
* The C equivalent of org.apache.org.hadoop.FSData(Input|Output)Stream .
|
||||
*/
|
||||
enum hdfsStreamType
|
||||
{
|
||||
UNINITIALIZED = 0,
|
||||
INPUT = 1,
|
||||
OUTPUT = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* The 'file-handle' to a file in hdfs.
|
||||
*/
|
||||
struct hdfsFile_internal {
|
||||
void* file;
|
||||
enum hdfsStreamType type;
|
||||
int flags;
|
||||
tOffset offset;
|
||||
};
|
||||
typedef struct hdfsFile_internal* hdfsFile;
|
||||
|
||||
/**
|
||||
* hdfsFileInfo - Information about a file/directory.
|
||||
*/
|
||||
typedef struct {
|
||||
tObjectKind mKind; /* file or directory */
|
||||
char *mName; /* the name of the file */
|
||||
tTime mLastMod; /* the last modification time for the file in seconds */
|
||||
tOffset mSize; /* the size of the file in bytes */
|
||||
short mReplication; /* the count of replicas */
|
||||
tOffset mBlockSize; /* the block size for the file */
|
||||
char *mOwner; /* the owner of the file */
|
||||
char *mGroup; /* the group associated with the file */
|
||||
short mPermissions; /* the permissions associated with the file */
|
||||
tTime mLastAccess; /* the last access time for the file in seconds */
|
||||
} hdfsFileInfo;
|
||||
|
||||
/**
|
||||
* webhdfsBuffer - used for hold the data for read/write from/to http connection
|
||||
*/
|
||||
typedef struct {
|
||||
const char *wbuffer; /* the user's buffer for uploading */
|
||||
size_t remaining; /* length of content */
|
||||
size_t offset; /* offset for reading */
|
||||
int openFlag; /* check whether the hdfsOpenFile has been called before */
|
||||
int closeFlag; /* whether to close the http connection for writing */
|
||||
pthread_mutex_t writeMutex; // used for syschronization between the curl thread and the hdfsWrite thread
|
||||
pthread_cond_t newwrite_or_close; // transferring thread waits for this condition
|
||||
// when there is no more content for transferring in the buffer
|
||||
pthread_cond_t transfer_finish; // condition used to indicate finishing transferring (one buffer)
|
||||
} webhdfsBuffer;
|
||||
|
||||
struct webhdfsFileHandle {
|
||||
char *absPath;
|
||||
int bufferSize;
|
||||
short replication;
|
||||
tSize blockSize;
|
||||
char *datanode;
|
||||
webhdfsBuffer *uploadBuffer;
|
||||
pthread_t connThread;
|
||||
};
|
||||
|
||||
// Bit fields for hdfsFile_internal flags
|
||||
#define HDFS_FILE_SUPPORTS_DIRECT_READ (1<<0)
|
||||
|
||||
/**
|
||||
* Determine if a file is open for read.
|
||||
*
|
||||
* @param file The HDFS file
|
||||
* @return 1 if the file is open for read; 0 otherwise
|
||||
*/
|
||||
int hdfsFileIsOpenForRead(hdfsFile file);
|
||||
|
||||
/**
|
||||
* Determine if a file is open for write.
|
||||
*
|
||||
* @param file The HDFS file
|
||||
* @return 1 if the file is open for write; 0 otherwise
|
||||
*/
|
||||
int hdfsFileIsOpenForWrite(hdfsFile file);
|
||||
|
||||
/**
|
||||
* Disable the direct read optimization for a file in libhdfs.
|
||||
* This is mainly provided for unit testing purposes.
|
||||
* No longer useful in libwebhdfs since libwebhdfs is based on webhdfs.
|
||||
*
|
||||
* @param file The HDFS file
|
||||
*/
|
||||
void hdfsFileDisableDirectRead(hdfsFile file);
|
||||
|
||||
/**
|
||||
* hdfsConnectAsUser - Connect to a hdfs file system as a specific user
|
||||
* Connect to the hdfs.
|
||||
* @param nn The NameNode. See hdfsBuilderSetNameNode for details.
|
||||
* @param port The port on which the server is listening.
|
||||
* @param user the user name (this is hadoop domain user). Or NULL is equivelant to hhdfsConnect(host, port)
|
||||
* @return Returns a handle to the filesystem or NULL on error.
|
||||
* @deprecated Use hdfsBuilderConnect instead.
|
||||
*/
|
||||
hdfsFS hdfsConnectAsUser(const char* nn, tPort port, const char *user);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsConnect - Connect to a hdfs file system.
|
||||
* Connect to the hdfs.
|
||||
* @param nn The NameNode. See hdfsBuilderSetNameNode for details.
|
||||
* @param port The port on which the server is listening.
|
||||
* @return Returns a handle to the filesystem or NULL on error.
|
||||
* @deprecated Use hdfsBuilderConnect instead.
|
||||
*/
|
||||
hdfsFS hdfsConnect(const char* nn, tPort port);
|
||||
|
||||
/**
|
||||
* hdfsConnect - Connect to an hdfs file system.
|
||||
*
|
||||
* The effect with hdfsConnectAsUser in libwebhdfs.
|
||||
*
|
||||
* @param nn The NameNode. See hdfsBuilderSetNameNode for details.
|
||||
* @param port The port on which the server is listening.
|
||||
* @param user The user name to use when connecting
|
||||
* @return Returns a handle to the filesystem or NULL on error.
|
||||
* @deprecated Use hdfsBuilderConnect instead.
|
||||
*/
|
||||
hdfsFS hdfsConnectAsUserNewInstance(const char* nn, tPort port, const char *user );
|
||||
|
||||
/**
|
||||
* hdfsConnect - Connect to an hdfs file system.
|
||||
*
|
||||
* The same effect with hdfsConnect in libwebhdfs.
|
||||
*
|
||||
* @param nn The NameNode. See hdfsBuilderSetNameNode for details.
|
||||
* @param port The port on which the server is listening.
|
||||
* @return Returns a handle to the filesystem or NULL on error.
|
||||
* @deprecated Use hdfsBuilderConnect instead.
|
||||
*/
|
||||
hdfsFS hdfsConnectNewInstance(const char* nn, tPort port);
|
||||
|
||||
/**
|
||||
* Connect to HDFS using the parameters defined by the builder.
|
||||
*
|
||||
* Every successful call to hdfsBuilderConnect should be matched with a call
|
||||
* to hdfsDisconnect, when the hdfsFS is no longer needed.
|
||||
*
|
||||
* @param bld The HDFS builder
|
||||
* @return Returns a handle to the filesystem, or NULL on error.
|
||||
*/
|
||||
hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld);
|
||||
|
||||
/**
|
||||
* Create an HDFS builder.
|
||||
*
|
||||
* @return The HDFS builder, or NULL on error.
|
||||
*/
|
||||
struct hdfsBuilder *hdfsNewBuilder(void);
|
||||
|
||||
/**
|
||||
* In libhdfs: force the builder to always create a new instance of the FileSystem,
|
||||
* rather than possibly finding one in the cache.
|
||||
*
|
||||
* @param bld The HDFS builder
|
||||
* @deprecated No longer usefule in libwebhdfs.
|
||||
*/
|
||||
void hdfsBuilderSetForceNewInstance(struct hdfsBuilder *bld);
|
||||
|
||||
/**
|
||||
* Set the HDFS NameNode to connect to.
|
||||
*
|
||||
* @param bld The HDFS builder
|
||||
* @param nn The NameNode to use.
|
||||
*
|
||||
* If the string given is 'default', the default NameNode
|
||||
* configuration will be used (from the XML configuration files)
|
||||
*
|
||||
* If NULL is given, a LocalFileSystem will be created.
|
||||
*
|
||||
* If the string starts with a protocol type such as file:// or
|
||||
* hdfs://, this protocol type will be used. If not, the
|
||||
* hdfs:// protocol type will be used.
|
||||
*
|
||||
* You may specify a NameNode port in the usual way by
|
||||
* passing a string of the format hdfs://<hostname>:<port>.
|
||||
* Alternately, you may set the port with
|
||||
* hdfsBuilderSetNameNodePort. However, you must not pass the
|
||||
* port in two different ways.
|
||||
*/
|
||||
void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn);
|
||||
|
||||
/**
|
||||
* Set the port of the HDFS NameNode to connect to.
|
||||
*
|
||||
* @param bld The HDFS builder
|
||||
* @param port The port.
|
||||
*/
|
||||
void hdfsBuilderSetNameNodePort(struct hdfsBuilder *bld, tPort port);
|
||||
|
||||
/**
|
||||
* Set the username to use when connecting to the HDFS cluster.
|
||||
*
|
||||
* @param bld The HDFS builder
|
||||
* @param userName The user name. The string will be shallow-copied.
|
||||
*/
|
||||
void hdfsBuilderSetUserName(struct hdfsBuilder *bld, const char *userName);
|
||||
|
||||
/**
|
||||
* Set the path to the Kerberos ticket cache to use when connecting to
|
||||
* the HDFS cluster.
|
||||
*
|
||||
* @param bld The HDFS builder
|
||||
* @param kerbTicketCachePath The Kerberos ticket cache path. The string
|
||||
* will be shallow-copied.
|
||||
*/
|
||||
void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder *bld,
|
||||
const char *kerbTicketCachePath);
|
||||
|
||||
/**
|
||||
* Free an HDFS builder.
|
||||
*
|
||||
* @param bld The HDFS builder
|
||||
*/
|
||||
void hdfsFreeBuilder(struct hdfsBuilder *bld);
|
||||
|
||||
/**
|
||||
* Get a configuration string.
|
||||
*
|
||||
* @param key The key to find
|
||||
* @param val (out param) The value. This will be set to NULL if the
|
||||
* key isn't found. You must free this string with
|
||||
* hdfsConfStrFree.
|
||||
*
|
||||
* @return 0 on success; nonzero error code otherwise.
|
||||
* Failure to find the key is not an error.
|
||||
*/
|
||||
int hdfsConfGetStr(const char *key, char **val);
|
||||
|
||||
/**
|
||||
* Get a configuration integer.
|
||||
*
|
||||
* @param key The key to find
|
||||
* @param val (out param) The value. This will NOT be changed if the
|
||||
* key isn't found.
|
||||
*
|
||||
* @return 0 on success; nonzero error code otherwise.
|
||||
* Failure to find the key is not an error.
|
||||
*/
|
||||
int hdfsConfGetInt(const char *key, int32_t *val);
|
||||
|
||||
/**
|
||||
* Free a configuration string found with hdfsConfGetStr.
|
||||
*
|
||||
* @param val A configuration string obtained from hdfsConfGetStr
|
||||
*/
|
||||
void hdfsConfStrFree(char *val);
|
||||
|
||||
/**
|
||||
* hdfsDisconnect - Disconnect from the hdfs file system.
|
||||
* Disconnect from hdfs.
|
||||
*
|
||||
* In libwebhdfs, we simply free the hdfsFS,
|
||||
* so do not use it after hdfsCopy/hdfsMove/hdfsGetDefaultBlockSize which still use JNI for FileSystem connection.
|
||||
*
|
||||
* @param fs The configured filesystem handle.
|
||||
* @return Returns 0 on success, -1 on error.
|
||||
*/
|
||||
int hdfsDisconnect(hdfsFS fs);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsOpenFile - Open a hdfs file in given mode.
|
||||
* In libwebhdfs we simply store corresponding information in a hdfsFile.
|
||||
*
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param path The full path to the file.
|
||||
* @param flags - an | of bits/fcntl.h file flags - supported flags are O_RDONLY, O_WRONLY (meaning create or overwrite i.e., implies O_TRUNCAT),
|
||||
* O_WRONLY|O_APPEND. Other flags are generally ignored other than (O_RDWR || (O_EXCL & O_CREAT)) which return NULL and set errno equal ENOTSUP.
|
||||
* @param bufferSize Size of buffer for read/write - pass 0 if you want
|
||||
* to use the default configured values.
|
||||
* @param replication Block replication - pass 0 if you want to use
|
||||
* the default configured values.
|
||||
* @param blocksize Size of block - pass 0 if you want to use the
|
||||
* default configured values.
|
||||
* @return Returns the handle to the open file or NULL on error.
|
||||
*/
|
||||
hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
|
||||
int bufferSize, short replication, tSize blocksize);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsCloseFile - Close an open file.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param file The file handle.
|
||||
* @return Returns 0 on success, -1 on error.
|
||||
*/
|
||||
int hdfsCloseFile(hdfsFS fs, hdfsFile file);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsExists - Checks if a given path exsits on the filesystem
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param path The path to look for
|
||||
* @return Returns 0 on success, -1 on error.
|
||||
*/
|
||||
int hdfsExists(hdfsFS fs, const char *path);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsSeek - Seek to given offset in file.
|
||||
* This works only for files opened in read-only mode.
|
||||
* In libwebhdfs we store the offset in the local hdfsFile handle, thus
|
||||
* in this function we simply set the local offset.
|
||||
*
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param file The file handle.
|
||||
* @param desiredPos Offset into the file to seek into.
|
||||
* @return Returns 0 on success, -1 on error.
|
||||
*/
|
||||
int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsTell - Get the current offset in the file, in bytes.
|
||||
* In libwebhdfs the current offset is stored in the local hdfsFile handle,
|
||||
* thus this function simply sets the local offset.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param file The file handle.
|
||||
* @return Current offset, -1 on error.
|
||||
*/
|
||||
tOffset hdfsTell(hdfsFS fs, hdfsFile file);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsRead - Read data from an open file.
|
||||
* In libwebhdfs the reading starts from the current offset which is stored in the hdfsFile handle
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param file The file handle.
|
||||
* @param buffer The buffer to copy read bytes into.
|
||||
* @param length The length of the buffer.
|
||||
* @return On success, a positive number indicating how many bytes
|
||||
* were read.
|
||||
* On end-of-file, 0.
|
||||
* On error, -1. Errno will be set to the error code.
|
||||
* Just like the POSIX read function, hdfsRead will return -1
|
||||
* and set errno to EINTR if data is temporarily unavailable,
|
||||
* but we are not yet at the end of the file.
|
||||
*/
|
||||
tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length);
|
||||
|
||||
/**
|
||||
* hdfsPread - Positional read of data from an open file.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param file The file handle.
|
||||
* @param position Position from which to read
|
||||
* @param buffer The buffer to copy read bytes into.
|
||||
* @param length The length of the buffer.
|
||||
* @return Returns the number of bytes actually read, possibly less than
|
||||
* than length;-1 on error.
|
||||
*/
|
||||
tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position,
|
||||
void* buffer, tSize length);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsWrite - Write data into an open file.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param file The file handle.
|
||||
* @param buffer The data.
|
||||
* @param length The no. of bytes to write.
|
||||
* @return Returns the number of bytes written, -1 on error.
|
||||
*/
|
||||
tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer,
|
||||
tSize length);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsWrite - Flush the data. No use for libwebhdfs.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param file The file handle.
|
||||
* @return Returns 0 on success, -1 on error.
|
||||
* @deprecated Not usefule in libwebhdfs.
|
||||
*/
|
||||
int hdfsFlush(hdfsFS fs, hdfsFile file);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsHFlush - Flush out the data in client's user buffer. After the
|
||||
* return of this call, new readers will see the data.
|
||||
* @param fs configured filesystem handle
|
||||
* @param file file handle
|
||||
* @return 0 on success, -1 on error and sets errno
|
||||
* @deprecated Not usefule in libwebhdfs.
|
||||
*/
|
||||
int hdfsHFlush(hdfsFS fs, hdfsFile file);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsAvailable - Number of bytes that can be read from this
|
||||
* input stream.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param file The file handle.
|
||||
* @return Returns available bytes; -1 on error.
|
||||
*/
|
||||
int hdfsAvailable(hdfsFS fs, hdfsFile file);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsCopy - Copy file from one filesystem to another.
|
||||
* @param srcFS The handle to source filesystem.
|
||||
* @param src The path of source file.
|
||||
* @param dstFS The handle to destination filesystem.
|
||||
* @param dst The path of destination file.
|
||||
* @return Returns 0 on success, -1 on error.
|
||||
*/
|
||||
int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsMove - Move file from one filesystem to another.
|
||||
* @param srcFS The handle to source filesystem.
|
||||
* @param src The path of source file.
|
||||
* @param dstFS The handle to destination filesystem.
|
||||
* @param dst The path of destination file.
|
||||
* @return Returns 0 on success, -1 on error.
|
||||
*/
|
||||
int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsDelete - Delete file.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param path The path of the file.
|
||||
* @param recursive if path is a directory and set to
|
||||
* non-zero, the directory is deleted else throws an exception. In
|
||||
* case of a file the recursive argument is irrelevant.
|
||||
* @return Returns 0 on success, -1 on error.
|
||||
*/
|
||||
int hdfsDelete(hdfsFS fs, const char* path, int recursive);
|
||||
|
||||
/**
|
||||
* hdfsRename - Rename file.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param oldPath The path of the source file.
|
||||
* @param newPath The path of the destination file.
|
||||
* @return Returns 0 on success, -1 on error.
|
||||
*/
|
||||
int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsGetWorkingDirectory - Get the current working directory for
|
||||
* the given filesystem. In libwebhdfs it is retrieved from local hdfsFS handle.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param buffer The user-buffer to copy path of cwd into.
|
||||
* @param bufferSize The length of user-buffer.
|
||||
* @return Returns buffer, NULL on error.
|
||||
*/
|
||||
char* hdfsGetWorkingDirectory(hdfsFS fs, char *buffer, size_t bufferSize);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsSetWorkingDirectory - Set the working directory. All relative
|
||||
* paths will be resolved relative to it. In libwebhdfs the local hdfsFS is modified.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param path The path of the new 'cwd'.
|
||||
* @return Returns 0 on success, -1 on error.
|
||||
*/
|
||||
int hdfsSetWorkingDirectory(hdfsFS fs, const char* path);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsCreateDirectory - Make the given file and all non-existent
|
||||
* parents into directories.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param path The path of the directory.
|
||||
* @return Returns 0 on success, -1 on error.
|
||||
*/
|
||||
int hdfsCreateDirectory(hdfsFS fs, const char* path);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsSetReplication - Set the replication of the specified
|
||||
* file to the supplied value
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param path The path of the file.
|
||||
* @return Returns 0 on success, -1 on error.
|
||||
*/
|
||||
int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsListDirectory - Get list of files/directories for a given
|
||||
* directory-path. hdfsFreeFileInfo should be called to deallocate memory.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param path The path of the directory.
|
||||
* @param numEntries Set to the number of files/directories in path.
|
||||
* @return Returns a dynamically-allocated array of hdfsFileInfo
|
||||
* objects; NULL on error.
|
||||
*/
|
||||
hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path,
|
||||
int *numEntries);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsGetPathInfo - Get information about a path as a (dynamically
|
||||
* allocated) single hdfsFileInfo struct. hdfsFreeFileInfo should be
|
||||
* called when the pointer is no longer needed.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param path The path of the file.
|
||||
* @return Returns a dynamically-allocated hdfsFileInfo object;
|
||||
* NULL on error.
|
||||
*/
|
||||
hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsFreeFileInfo - Free up the hdfsFileInfo array (including fields)
|
||||
* @param hdfsFileInfo The array of dynamically-allocated hdfsFileInfo
|
||||
* objects.
|
||||
* @param numEntries The size of the array.
|
||||
*/
|
||||
void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsGetHosts - Get hostnames where a particular block (determined by
|
||||
* pos & blocksize) of a file is stored. The last element in the array
|
||||
* is NULL. Due to replication, a single block could be present on
|
||||
* multiple hosts.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param path The path of the file.
|
||||
* @param start The start of the block.
|
||||
* @param length The length of the block.
|
||||
* @return Returns a dynamically-allocated 2-d array of blocks-hosts;
|
||||
* NULL on error.
|
||||
*
|
||||
* Not supported yet but will be supported by libwebhdfs based on webhdfs.
|
||||
*/
|
||||
char*** hdfsGetHosts(hdfsFS fs, const char* path,
|
||||
tOffset start, tOffset length);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsFreeHosts - Free up the structure returned by hdfsGetHosts
|
||||
* @param hdfsFileInfo The array of dynamically-allocated hdfsFileInfo
|
||||
* objects.
|
||||
* @param numEntries The size of the array.
|
||||
*/
|
||||
void hdfsFreeHosts(char ***blockHosts);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsGetDefaultBlockSize - Get the optimum blocksize.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @return Returns the blocksize; -1 on error.
|
||||
*/
|
||||
tOffset hdfsGetDefaultBlockSize(hdfsFS fs);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsGetCapacity - Return the raw capacity of the filesystem.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @return Returns the raw-capacity; -1 on error.
|
||||
*
|
||||
* Not supported yet but will be supported by libwebhdfs based on webhdfs.
|
||||
*/
|
||||
tOffset hdfsGetCapacity(hdfsFS fs);
|
||||
|
||||
|
||||
/**
|
||||
* hdfsGetUsed - Return the total raw size of all files in the filesystem.
|
||||
* @param fs The configured filesystem handle.
|
||||
* @return Returns the total-size; -1 on error.
|
||||
*
|
||||
* Not supported yet but will be supported by libwebhdfs based on webhdfs.
|
||||
*/
|
||||
tOffset hdfsGetUsed(hdfsFS fs);
|
||||
|
||||
/**
|
||||
* hdfsChown
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param path the path to the file or directory
|
||||
* @param owner this is a string in Hadoop land. Set to null or "" if only setting group
|
||||
* @param group this is a string in Hadoop land. Set to null or "" if only setting user
|
||||
* @return 0 on success else -1
|
||||
*/
|
||||
int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group);
|
||||
|
||||
/**
|
||||
* hdfsChmod
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param path the path to the file or directory
|
||||
* @param mode the bitmask to set it to
|
||||
* @return 0 on success else -1
|
||||
*/
|
||||
int hdfsChmod(hdfsFS fs, const char* path, short mode);
|
||||
|
||||
/**
|
||||
* hdfsUtime
|
||||
* @param fs The configured filesystem handle.
|
||||
* @param path the path to the file or directory
|
||||
* @param mtime new modification time or -1 for no change
|
||||
* @param atime new access time or -1 for no change
|
||||
* @return 0 on success else -1
|
||||
*/
|
||||
int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*LIB_WEBHDFS_H*/
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
bin=`which $0`
|
||||
bin=`dirname ${bin}`
|
||||
bin=`cd "$bin"; pwd`
|
||||
bin=`cd "$bin" > /dev/null; pwd`
|
||||
|
||||
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
|
||||
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
|
||||
|
||||
*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
|
||||
# default sampling period
|
||||
# default sampling period, in seconds
|
||||
*.period=10
|
||||
|
||||
# The namenode-metrics.out will contain metrics from all context
|
||||
|
|
|
@ -39,6 +39,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE
|
|||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT;
|
||||
|
@ -209,6 +211,7 @@ public class DFSClient implements java.io.Closeable {
|
|||
final int writePacketSize;
|
||||
final int socketTimeout;
|
||||
final int socketCacheCapacity;
|
||||
final long socketCacheExpiry;
|
||||
/** Wait time window (in msec) if BlockMissingException is caught */
|
||||
final int timeWindow;
|
||||
final int nCachedConnRetry;
|
||||
|
@ -257,6 +260,8 @@ public class DFSClient implements java.io.Closeable {
|
|||
taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
|
||||
socketCacheCapacity = conf.getInt(DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
|
||||
DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
|
||||
socketCacheExpiry = conf.getLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
|
||||
DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT);
|
||||
prefetchSize = conf.getLong(DFS_CLIENT_READ_PREFETCH_SIZE_KEY,
|
||||
10 * defaultBlockSize);
|
||||
timeWindow = conf
|
||||
|
@ -427,7 +432,7 @@ public class DFSClient implements java.io.Closeable {
|
|||
Joiner.on(',').join(localInterfaceAddrs) + "]");
|
||||
}
|
||||
|
||||
this.socketCache = new SocketCache(dfsClientConf.socketCacheCapacity);
|
||||
this.socketCache = SocketCache.getInstance(dfsClientConf.socketCacheCapacity, dfsClientConf.socketCacheExpiry);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -641,7 +646,6 @@ public class DFSClient implements java.io.Closeable {
|
|||
void abort() {
|
||||
clientRunning = false;
|
||||
closeAllFilesBeingWritten(true);
|
||||
socketCache.clear();
|
||||
|
||||
try {
|
||||
// remove reference to this client and stop the renewer,
|
||||
|
@ -688,7 +692,6 @@ public class DFSClient implements java.io.Closeable {
|
|||
public synchronized void close() throws IOException {
|
||||
if(clientRunning) {
|
||||
closeAllFilesBeingWritten(false);
|
||||
socketCache.clear();
|
||||
clientRunning = false;
|
||||
getLeaseRenewer().closeClient(this);
|
||||
// close connections to the namenode
|
||||
|
|
|
@ -74,6 +74,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY = "dfs.client.failover.connection.retries.on.timeouts";
|
||||
public static final int DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0;
|
||||
|
||||
public static final String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = "dfs.client.socketcache.expiryMsec";
|
||||
public static final long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 2 * 60 * 1000;
|
||||
public static final String DFS_NAMENODE_BACKUP_ADDRESS_KEY = "dfs.namenode.backup.address";
|
||||
public static final String DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT = "localhost:50100";
|
||||
public static final String DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY = "dfs.namenode.backup.http-address";
|
||||
|
|
|
@ -254,6 +254,9 @@ public class HftpFileSystem extends FileSystem
|
|||
", assuming security is disabled");
|
||||
return null;
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Exception getting delegation token", e);
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
for (Token<? extends TokenIdentifier> t : c.getAllTokens()) {
|
||||
|
|
|
@ -26,33 +26,112 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import java.io.IOException;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.LinkedListMultimap;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
/**
|
||||
* A cache of sockets.
|
||||
* A cache of input stream sockets to Data Node.
|
||||
*/
|
||||
class SocketCache {
|
||||
static final Log LOG = LogFactory.getLog(SocketCache.class);
|
||||
private static final Log LOG = LogFactory.getLog(SocketCache.class);
|
||||
|
||||
private final LinkedListMultimap<SocketAddress, SocketAndStreams> multimap;
|
||||
private final int capacity;
|
||||
|
||||
/**
|
||||
* Create a SocketCache with the given capacity.
|
||||
* @param capacity Max cache size.
|
||||
*/
|
||||
public SocketCache(int capacity) {
|
||||
multimap = LinkedListMultimap.create();
|
||||
this.capacity = capacity;
|
||||
if (capacity <= 0) {
|
||||
LOG.debug("SocketCache disabled in configuration.");
|
||||
@InterfaceAudience.Private
|
||||
static class SocketAndStreams implements Closeable {
|
||||
public final Socket sock;
|
||||
public final IOStreamPair ioStreams;
|
||||
long createTime;
|
||||
|
||||
public SocketAndStreams(Socket s, IOStreamPair ioStreams) {
|
||||
this.sock = s;
|
||||
this.ioStreams = ioStreams;
|
||||
this.createTime = System.currentTimeMillis();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (ioStreams != null) {
|
||||
IOUtils.closeStream(ioStreams.in);
|
||||
IOUtils.closeStream(ioStreams.out);
|
||||
}
|
||||
IOUtils.closeSocket(sock);
|
||||
}
|
||||
|
||||
public long getCreateTime() {
|
||||
return this.createTime;
|
||||
}
|
||||
}
|
||||
|
||||
private Daemon daemon;
|
||||
/** A map for per user per datanode. */
|
||||
private static LinkedListMultimap<SocketAddress, SocketAndStreams> multimap =
|
||||
LinkedListMultimap.create();
|
||||
private static int capacity;
|
||||
private static long expiryPeriod;
|
||||
private static SocketCache scInstance = new SocketCache();
|
||||
private static boolean isInitedOnce = false;
|
||||
|
||||
public static synchronized SocketCache getInstance(int c, long e) {
|
||||
// capacity is only initialized once
|
||||
if (isInitedOnce == false) {
|
||||
capacity = c;
|
||||
expiryPeriod = e;
|
||||
|
||||
if (capacity == 0 ) {
|
||||
LOG.info("SocketCache disabled.");
|
||||
}
|
||||
else if (expiryPeriod == 0) {
|
||||
throw new IllegalStateException("Cannot initialize expiryPeriod to " +
|
||||
expiryPeriod + "when cache is enabled.");
|
||||
}
|
||||
isInitedOnce = true;
|
||||
} else { //already initialized once
|
||||
if (capacity != c || expiryPeriod != e) {
|
||||
LOG.info("capacity and expiry periods already set to " + capacity +
|
||||
" and " + expiryPeriod + " respectively. Cannot set it to " + c +
|
||||
" and " + e);
|
||||
}
|
||||
}
|
||||
|
||||
return scInstance;
|
||||
}
|
||||
|
||||
private boolean isDaemonStarted() {
|
||||
return (daemon == null)? false: true;
|
||||
}
|
||||
|
||||
private synchronized void startExpiryDaemon() {
|
||||
// start daemon only if not already started
|
||||
if (isDaemonStarted() == true) {
|
||||
return;
|
||||
}
|
||||
|
||||
daemon = new Daemon(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
SocketCache.this.run();
|
||||
} catch(InterruptedException e) {
|
||||
//noop
|
||||
} finally {
|
||||
SocketCache.this.clear();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.valueOf(SocketCache.this);
|
||||
}
|
||||
});
|
||||
daemon.start();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -61,16 +140,17 @@ class SocketCache {
|
|||
* @return A socket with unknown state, possibly closed underneath. Or null.
|
||||
*/
|
||||
public synchronized SocketAndStreams get(SocketAddress remote) {
|
||||
|
||||
if (capacity <= 0) { // disabled
|
||||
return null;
|
||||
}
|
||||
|
||||
List<SocketAndStreams> socklist = multimap.get(remote);
|
||||
if (socklist == null) {
|
||||
|
||||
List<SocketAndStreams> sockStreamList = multimap.get(remote);
|
||||
if (sockStreamList == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Iterator<SocketAndStreams> iter = socklist.iterator();
|
||||
Iterator<SocketAndStreams> iter = sockStreamList.iterator();
|
||||
while (iter.hasNext()) {
|
||||
SocketAndStreams candidate = iter.next();
|
||||
iter.remove();
|
||||
|
@ -86,14 +166,16 @@ class SocketCache {
|
|||
* @param sock socket not used by anyone.
|
||||
*/
|
||||
public synchronized void put(Socket sock, IOStreamPair ioStreams) {
|
||||
|
||||
Preconditions.checkNotNull(sock);
|
||||
SocketAndStreams s = new SocketAndStreams(sock, ioStreams);
|
||||
if (capacity <= 0) {
|
||||
// Cache disabled.
|
||||
s.close();
|
||||
return;
|
||||
}
|
||||
|
||||
Preconditions.checkNotNull(sock);
|
||||
|
||||
startExpiryDaemon();
|
||||
|
||||
SocketAddress remoteAddr = sock.getRemoteSocketAddress();
|
||||
if (remoteAddr == null) {
|
||||
|
@ -106,13 +188,33 @@ class SocketCache {
|
|||
if (capacity == multimap.size()) {
|
||||
evictOldest();
|
||||
}
|
||||
multimap.put(remoteAddr, new SocketAndStreams(sock, ioStreams));
|
||||
multimap.put(remoteAddr, s);
|
||||
}
|
||||
|
||||
public synchronized int size() {
|
||||
return multimap.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Evict and close sockets older than expiry period from the cache.
|
||||
*/
|
||||
private synchronized void evictExpired(long expiryPeriod) {
|
||||
while (multimap.size() != 0) {
|
||||
Iterator<Entry<SocketAddress, SocketAndStreams>> iter =
|
||||
multimap.entries().iterator();
|
||||
Entry<SocketAddress, SocketAndStreams> entry = iter.next();
|
||||
// if oldest socket expired, remove it
|
||||
if (entry == null ||
|
||||
System.currentTimeMillis() - entry.getValue().getCreateTime() <
|
||||
expiryPeriod) {
|
||||
break;
|
||||
}
|
||||
iter.remove();
|
||||
SocketAndStreams s = entry.getValue();
|
||||
s.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Evict the oldest entry in the cache.
|
||||
*/
|
||||
|
@ -120,7 +222,8 @@ class SocketCache {
|
|||
Iterator<Entry<SocketAddress, SocketAndStreams>> iter =
|
||||
multimap.entries().iterator();
|
||||
if (!iter.hasNext()) {
|
||||
throw new IllegalStateException("Cannot evict from empty cache!");
|
||||
throw new IllegalStateException("Cannot evict from empty cache! " +
|
||||
"capacity: " + capacity);
|
||||
}
|
||||
Entry<SocketAddress, SocketAndStreams> entry = iter.next();
|
||||
iter.remove();
|
||||
|
@ -128,39 +231,32 @@ class SocketCache {
|
|||
s.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Periodically check in the cache and expire the entries
|
||||
* older than expiryPeriod minutes
|
||||
*/
|
||||
private void run() throws InterruptedException {
|
||||
for(long lastExpiryTime = System.currentTimeMillis();
|
||||
!Thread.interrupted();
|
||||
Thread.sleep(expiryPeriod)) {
|
||||
final long elapsed = System.currentTimeMillis() - lastExpiryTime;
|
||||
if (elapsed >= expiryPeriod) {
|
||||
evictExpired(expiryPeriod);
|
||||
lastExpiryTime = System.currentTimeMillis();
|
||||
}
|
||||
}
|
||||
clear();
|
||||
throw new InterruptedException("Daemon Interrupted");
|
||||
}
|
||||
|
||||
/**
|
||||
* Empty the cache, and close all sockets.
|
||||
*/
|
||||
public synchronized void clear() {
|
||||
for (SocketAndStreams s : multimap.values()) {
|
||||
s.close();
|
||||
private synchronized void clear() {
|
||||
for (SocketAndStreams sockAndStream : multimap.values()) {
|
||||
sockAndStream.close();
|
||||
}
|
||||
multimap.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void finalize() {
|
||||
clear();
|
||||
}
|
||||
|
||||
@InterfaceAudience.Private
|
||||
static class SocketAndStreams implements Closeable {
|
||||
public final Socket sock;
|
||||
public final IOStreamPair ioStreams;
|
||||
|
||||
public SocketAndStreams(Socket s, IOStreamPair ioStreams) {
|
||||
this.sock = s;
|
||||
this.ioStreams = ioStreams;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (ioStreams != null) {
|
||||
IOUtils.closeStream(ioStreams.in);
|
||||
IOUtils.closeStream(ioStreams.out);
|
||||
}
|
||||
IOUtils.closeSocket(sock);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -612,7 +612,8 @@ public class DatanodeManager {
|
|||
+ " storage " + nodeReg.getStorageID());
|
||||
|
||||
DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
|
||||
DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getXferAddr());
|
||||
DatanodeDescriptor nodeN = host2DatanodeMap.getDatanodeByXferAddr(
|
||||
nodeReg.getIpAddr(), nodeReg.getXferPort());
|
||||
|
||||
if (nodeN != null && nodeN != nodeS) {
|
||||
NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
|
||||
|
|
|
@ -159,6 +159,35 @@ class Host2NodesMap {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find data node by its transfer address
|
||||
*
|
||||
* @return DatanodeDescriptor if found or null otherwise
|
||||
*/
|
||||
public DatanodeDescriptor getDatanodeByXferAddr(String ipAddr,
|
||||
int xferPort) {
|
||||
if (ipAddr==null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
hostmapLock.readLock().lock();
|
||||
try {
|
||||
DatanodeDescriptor[] nodes = map.get(ipAddr);
|
||||
// no entry
|
||||
if (nodes== null) {
|
||||
return null;
|
||||
}
|
||||
for(DatanodeDescriptor containedNode:nodes) {
|
||||
if (xferPort == containedNode.getXferPort()) {
|
||||
return containedNode;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
} finally {
|
||||
hostmapLock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
|
||||
|
|
|
@ -276,6 +276,9 @@ public class JspHelper {
|
|||
FIELD_PERCENT_REMAINING = 9,
|
||||
FIELD_ADMIN_STATE = 10,
|
||||
FIELD_DECOMMISSIONED = 11,
|
||||
FIELD_BLOCKPOOL_USED = 12,
|
||||
FIELD_PERBLOCKPOOL_USED = 13,
|
||||
FIELD_FAILED_VOLUMES = 14,
|
||||
SORT_ORDER_ASC = 1,
|
||||
SORT_ORDER_DSC = 2;
|
||||
|
||||
|
@ -303,6 +306,12 @@ public class JspHelper {
|
|||
sortField = FIELD_ADMIN_STATE;
|
||||
} else if (field.equals("decommissioned")) {
|
||||
sortField = FIELD_DECOMMISSIONED;
|
||||
} else if (field.equals("bpused")) {
|
||||
sortField = FIELD_BLOCKPOOL_USED;
|
||||
} else if (field.equals("pcbpused")) {
|
||||
sortField = FIELD_PERBLOCKPOOL_USED;
|
||||
} else if (field.equals("volfails")) {
|
||||
sortField = FIELD_FAILED_VOLUMES;
|
||||
} else {
|
||||
sortField = FIELD_NAME;
|
||||
}
|
||||
|
@ -361,6 +370,18 @@ public class JspHelper {
|
|||
case FIELD_NAME:
|
||||
ret = d1.getHostName().compareTo(d2.getHostName());
|
||||
break;
|
||||
case FIELD_BLOCKPOOL_USED:
|
||||
dlong = d1.getBlockPoolUsed() - d2.getBlockPoolUsed();
|
||||
ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
|
||||
break;
|
||||
case FIELD_PERBLOCKPOOL_USED:
|
||||
ddbl = d1.getBlockPoolUsedPercent() - d2.getBlockPoolUsedPercent();
|
||||
ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0);
|
||||
break;
|
||||
case FIELD_FAILED_VOLUMES:
|
||||
int dint = d1.getVolumeFailures() - d2.getVolumeFailures();
|
||||
ret = (dint < 0) ? -1 : ((dint > 0) ? 1 : 0);
|
||||
break;
|
||||
}
|
||||
return (sortOrder == SORT_ORDER_DSC) ? -ret : ret;
|
||||
}
|
||||
|
|
|
@ -4059,7 +4059,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return "Safe mode is OFF.";
|
||||
String leaveMsg = "";
|
||||
if (areResourcesLow()) {
|
||||
leaveMsg = "Resources are low on NN. Safe mode must be turned off manually";
|
||||
leaveMsg = "Resources are low on NN. "
|
||||
+ "Please add or free up more resources then turn off safe mode manually. "
|
||||
+ "NOTE: If you turn off safe mode before adding resources, "
|
||||
+ "the NN will immediately return to safe mode.";
|
||||
} else {
|
||||
leaveMsg = "Safe mode will be turned off automatically";
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
import java.io.PrintStream;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
@ -510,7 +511,7 @@ public class NameNode {
|
|||
stopHttpServer();
|
||||
}
|
||||
|
||||
private void startTrashEmptier(Configuration conf) throws IOException {
|
||||
private void startTrashEmptier(final Configuration conf) throws IOException {
|
||||
long trashInterval =
|
||||
conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT);
|
||||
if (trashInterval == 0) {
|
||||
|
@ -519,7 +520,18 @@ public class NameNode {
|
|||
throw new IOException("Cannot start tresh emptier with negative interval."
|
||||
+ " Set " + FS_TRASH_INTERVAL_KEY + " to a positive value.");
|
||||
}
|
||||
this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
|
||||
|
||||
// This may be called from the transitionToActive code path, in which
|
||||
// case the current user is the administrator, not the NN. The trash
|
||||
// emptier needs to run as the NN. See HDFS-3972.
|
||||
FileSystem fs = SecurityUtil.doAsLoginUser(
|
||||
new PrivilegedExceptionAction<FileSystem>() {
|
||||
@Override
|
||||
public FileSystem run() throws IOException {
|
||||
return FileSystem.get(conf);
|
||||
}
|
||||
});
|
||||
this.emptier = new Thread(new Trash(fs, conf).getEmptier(), "Trash Emptier");
|
||||
this.emptier.setDaemon(true);
|
||||
this.emptier.start();
|
||||
}
|
||||
|
|
|
@ -107,6 +107,10 @@ public class NameNodeHttpServer {
|
|||
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
|
||||
SecurityUtil.getServerPrincipal(principalInConf,
|
||||
bindAddress.getHostName()));
|
||||
} else if (UserGroupInformation.isSecurityEnabled()) {
|
||||
LOG.error("WebHDFS and security are enabled, but configuration property '" +
|
||||
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY +
|
||||
"' is not set.");
|
||||
}
|
||||
String httpKeytab = conf.get(
|
||||
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
|
||||
|
@ -117,6 +121,10 @@ public class NameNodeHttpServer {
|
|||
params.put(
|
||||
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
|
||||
httpKeytab);
|
||||
} else if (UserGroupInformation.isSecurityEnabled()) {
|
||||
LOG.error("WebHDFS and security are enabled, but configuration property '" +
|
||||
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY +
|
||||
"' is not set.");
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
|
|
@ -78,6 +78,7 @@ import org.apache.hadoop.util.StringUtils;
|
|||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
/**********************************************************
|
||||
|
@ -122,6 +123,8 @@ public class SecondaryNameNode implements Runnable {
|
|||
private CheckpointConf checkpointConf;
|
||||
private FSNamesystem namesystem;
|
||||
|
||||
private Thread checkpointThread;
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
|
@ -277,6 +280,15 @@ public class SecondaryNameNode implements Runnable {
|
|||
*/
|
||||
public void shutdown() {
|
||||
shouldRun = false;
|
||||
if (checkpointThread != null) {
|
||||
checkpointThread.interrupt();
|
||||
try {
|
||||
checkpointThread.join(10000);
|
||||
} catch (InterruptedException e) {
|
||||
LOG.info("Interrupted waiting to join on checkpointer thread");
|
||||
Thread.currentThread().interrupt(); // maintain status
|
||||
}
|
||||
}
|
||||
try {
|
||||
if (infoServer != null) infoServer.stop();
|
||||
} catch (Exception e) {
|
||||
|
@ -586,12 +598,20 @@ public class SecondaryNameNode implements Runnable {
|
|||
terminate(ret);
|
||||
}
|
||||
|
||||
// Create a never ending deamon
|
||||
Daemon checkpointThread = new Daemon(secondary);
|
||||
checkpointThread.start();
|
||||
secondary.startCheckpointThread();
|
||||
}
|
||||
|
||||
|
||||
public void startCheckpointThread() {
|
||||
Preconditions.checkState(checkpointThread == null,
|
||||
"Should not already have a thread");
|
||||
Preconditions.checkState(shouldRun, "shouldRun should be true");
|
||||
|
||||
checkpointThread = new Daemon(this);
|
||||
checkpointThread.start();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Container for parsed command-line options.
|
||||
*/
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.tools;
|
|||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -53,6 +54,7 @@ import org.apache.hadoop.ipc.RPC;
|
|||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
@ -80,7 +82,7 @@ public class DFSAdmin extends FsShell {
|
|||
super(fs.getConf());
|
||||
if (!(fs instanceof DistributedFileSystem)) {
|
||||
throw new IllegalArgumentException("FileSystem " + fs.getUri() +
|
||||
" is not a distributed file system");
|
||||
" is not an HDFS file system");
|
||||
}
|
||||
this.dfs = (DistributedFileSystem)fs;
|
||||
}
|
||||
|
@ -284,7 +286,7 @@ public class DFSAdmin extends FsShell {
|
|||
FileSystem fs = getFS();
|
||||
if (!(fs instanceof DistributedFileSystem)) {
|
||||
throw new IllegalArgumentException("FileSystem " + fs.getUri() +
|
||||
" is not a distributed file system");
|
||||
" is not an HDFS file system");
|
||||
}
|
||||
return (DistributedFileSystem)fs;
|
||||
}
|
||||
|
@ -511,11 +513,17 @@ public class DFSAdmin extends FsShell {
|
|||
* @return an exit code indicating success or failure.
|
||||
* @throws IOException
|
||||
*/
|
||||
public int fetchImage(String[] argv, int idx) throws IOException {
|
||||
String infoServer = DFSUtil.getInfoServer(
|
||||
public int fetchImage(final String[] argv, final int idx) throws IOException {
|
||||
final String infoServer = DFSUtil.getInfoServer(
|
||||
HAUtil.getAddressOfActive(getDFS()), getConf(), false);
|
||||
TransferFsImage.downloadMostRecentImageToDirectory(infoServer,
|
||||
new File(argv[idx]));
|
||||
SecurityUtil.doAsCurrentUser(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
TransferFsImage.downloadMostRecentImageToDirectory(infoServer,
|
||||
new File(argv[idx]));
|
||||
return null;
|
||||
}
|
||||
});
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -67,6 +67,25 @@ static const struct ExceptionInfo gExceptionInfo[] = {
|
|||
|
||||
};
|
||||
|
||||
void getExceptionInfo(const char *excName, int noPrintFlags,
|
||||
int *excErrno, int *shouldPrint)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < EXCEPTION_INFO_LEN; i++) {
|
||||
if (strstr(gExceptionInfo[i].name, excName)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i < EXCEPTION_INFO_LEN) {
|
||||
*shouldPrint = !(gExceptionInfo[i].noPrintFlag & noPrintFlags);
|
||||
*excErrno = gExceptionInfo[i].excErrno;
|
||||
} else {
|
||||
*shouldPrint = 1;
|
||||
*excErrno = EINTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
|
||||
const char *fmt, va_list ap)
|
||||
{
|
||||
|
|
|
@ -64,6 +64,21 @@
|
|||
#define NOPRINT_EXC_PARENT_NOT_DIRECTORY 0x08
|
||||
#define NOPRINT_EXC_ILLEGAL_ARGUMENT 0x10
|
||||
|
||||
/**
|
||||
* Get information about an exception.
|
||||
*
|
||||
* @param excName The Exception name.
|
||||
* This is a Java class name in JNI format.
|
||||
* @param noPrintFlags Flags which determine which exceptions we should NOT
|
||||
* print.
|
||||
* @param excErrno (out param) The POSIX error number associated with the
|
||||
* exception.
|
||||
* @param shouldPrint (out param) Nonzero if we should print this exception,
|
||||
* based on the noPrintFlags and its name.
|
||||
*/
|
||||
void getExceptionInfo(const char *excName, int noPrintFlags,
|
||||
int *excErrno, int *shouldPrint);
|
||||
|
||||
/**
|
||||
* Print out information about an exception and free it.
|
||||
*
|
||||
|
|
|
@ -40,12 +40,35 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address</name>
|
||||
<value></value>
|
||||
<description>
|
||||
RPC address that handles all clients requests. In the case of HA/Federation where multiple namenodes exist,
|
||||
the name service id is added to the name e.g. dfs.namenode.rpc-address.ns1
|
||||
dfs.namenode.rpc-address.EXAMPLENAMESERVICE
|
||||
The value of this property will take the form of hdfs://nn-host1:rpc-port.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.servicerpc-address</name>
|
||||
<value></value>
|
||||
<description>
|
||||
RPC address for HDFS Services communication. BackupNode, Datanodes and all other services should be
|
||||
connecting to this address if it is configured. In the case of HA/Federation where multiple namenodes exist,
|
||||
the name service id is added to the name e.g. dfs.namenode.servicerpc-address.ns1
|
||||
dfs.namenode.rpc-address.EXAMPLENAMESERVICE
|
||||
The value of this property will take the form of hdfs://nn-host1:rpc-port.
|
||||
If the value of this property is unset the value of dfs.namenode.rpc-address will be used as the default.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.secondary.http-address</name>
|
||||
<value>0.0.0.0:50090</value>
|
||||
<description>
|
||||
The secondary namenode http server address and port.
|
||||
If the port is 0 then the server will start on a free port.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
@ -54,7 +77,6 @@
|
|||
<value>0.0.0.0:50010</value>
|
||||
<description>
|
||||
The datanode server address and port for data transfer.
|
||||
If the port is 0 then the server will start on a free port.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
@ -63,7 +85,6 @@
|
|||
<value>0.0.0.0:50075</value>
|
||||
<description>
|
||||
The datanode http server address and port.
|
||||
If the port is 0 then the server will start on a free port.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
@ -72,7 +93,6 @@
|
|||
<value>0.0.0.0:50020</value>
|
||||
<description>
|
||||
The datanode ipc server address and port.
|
||||
If the port is 0 then the server will start on a free port.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
@ -87,7 +107,6 @@
|
|||
<value>0.0.0.0:50070</value>
|
||||
<description>
|
||||
The address and the base port where the dfs namenode web ui will listen on.
|
||||
If the port is 0 then the server will start on a free port.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
|
||||
public class TestHdfsNativeCodeLoader {
|
||||
static final Log LOG = LogFactory.getLog(TestHdfsNativeCodeLoader.class);
|
||||
|
||||
private static boolean requireTestJni() {
|
||||
String rtj = System.getProperty("require.test.libhadoop");
|
||||
if (rtj == null) return false;
|
||||
if (rtj.compareToIgnoreCase("false") == 0) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNativeCodeLoaded() {
|
||||
if (requireTestJni() == false) {
|
||||
LOG.info("TestNativeCodeLoader: libhadoop.so testing is not required.");
|
||||
return;
|
||||
}
|
||||
if (!NativeCodeLoader.isNativeCodeLoaded()) {
|
||||
String LD_LIBRARY_PATH = System.getenv().get("LD_LIBRARY_PATH");
|
||||
if (LD_LIBRARY_PATH == null) LD_LIBRARY_PATH = "";
|
||||
fail("TestNativeCodeLoader: libhadoop.so testing was required, but " +
|
||||
"libhadoop.so was not loaded. LD_LIBRARY_PATH = " + LD_LIBRARY_PATH);
|
||||
}
|
||||
LOG.info("TestHdfsNativeCodeLoader: libhadoop.so is loaded.");
|
||||
}
|
||||
}
|
|
@ -211,27 +211,40 @@ public class DFSTestUtil {
|
|||
|
||||
public static void createFile(FileSystem fs, Path fileName, long fileLen,
|
||||
short replFactor, long seed) throws IOException {
|
||||
createFile(fs, fileName, 1024, fileLen, fs.getDefaultBlockSize(fileName),
|
||||
replFactor, seed);
|
||||
}
|
||||
|
||||
public static void createFile(FileSystem fs, Path fileName, int bufferLen,
|
||||
long fileLen, long blockSize, short replFactor, long seed)
|
||||
throws IOException {
|
||||
assert bufferLen > 0;
|
||||
if (!fs.mkdirs(fileName.getParent())) {
|
||||
throw new IOException("Mkdirs failed to create " +
|
||||
fileName.getParent().toString());
|
||||
}
|
||||
FSDataOutputStream out = null;
|
||||
try {
|
||||
out = fs.create(fileName, replFactor);
|
||||
byte[] toWrite = new byte[1024];
|
||||
Random rb = new Random(seed);
|
||||
long bytesToWrite = fileLen;
|
||||
while (bytesToWrite>0) {
|
||||
rb.nextBytes(toWrite);
|
||||
int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite;
|
||||
|
||||
out.write(toWrite, 0, bytesToWriteNext);
|
||||
bytesToWrite -= bytesToWriteNext;
|
||||
out = fs.create(fileName, true, fs.getConf()
|
||||
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
|
||||
replFactor, blockSize);
|
||||
if (fileLen > 0) {
|
||||
byte[] toWrite = new byte[bufferLen];
|
||||
Random rb = new Random(seed);
|
||||
long bytesToWrite = fileLen;
|
||||
while (bytesToWrite>0) {
|
||||
rb.nextBytes(toWrite);
|
||||
int bytesToWriteNext = (bufferLen < bytesToWrite) ? bufferLen
|
||||
: (int) bytesToWrite;
|
||||
|
||||
out.write(toWrite, 0, bytesToWriteNext);
|
||||
bytesToWrite -= bytesToWriteNext;
|
||||
}
|
||||
}
|
||||
out.close();
|
||||
out = null;
|
||||
} finally {
|
||||
IOUtils.closeStream(out);
|
||||
if (out != null) {
|
||||
out.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -624,14 +624,20 @@ public class MiniDFSCluster {
|
|||
}
|
||||
|
||||
federation = nnTopology.isFederated();
|
||||
createNameNodesAndSetConf(
|
||||
nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
|
||||
enableManagedDfsDirsRedundancy,
|
||||
format, operation, clusterId, conf);
|
||||
|
||||
try {
|
||||
createNameNodesAndSetConf(
|
||||
nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
|
||||
enableManagedDfsDirsRedundancy,
|
||||
format, operation, clusterId, conf);
|
||||
} catch (IOException ioe) {
|
||||
LOG.error("IOE creating namenodes. Permissions dump:\n" +
|
||||
createPermissionsDiagnosisString(data_dir));
|
||||
throw ioe;
|
||||
}
|
||||
if (format) {
|
||||
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
|
||||
throw new IOException("Cannot remove data directory: " + data_dir);
|
||||
throw new IOException("Cannot remove data directory: " + data_dir +
|
||||
createPermissionsDiagnosisString(data_dir));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -647,6 +653,27 @@ public class MiniDFSCluster {
|
|||
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a debug string which can help diagnose an error of why
|
||||
* a given directory might have a permissions error in the context
|
||||
* of a test case
|
||||
*/
|
||||
private String createPermissionsDiagnosisString(File path) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
while (path != null) {
|
||||
sb.append("path '" + path + "': ").append("\n");
|
||||
sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n");
|
||||
sb.append("\tpermissions: ");
|
||||
sb.append(path.isDirectory() ? "d": "-");
|
||||
sb.append(path.canRead() ? "r" : "-");
|
||||
sb.append(path.canWrite() ? "w" : "-");
|
||||
sb.append(path.canExecute() ? "x" : "-");
|
||||
sb.append("\n");
|
||||
path = path.getParentFile();
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private void createNameNodesAndSetConf(MiniDFSNNTopology nnTopology,
|
||||
boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs,
|
||||
boolean enableManagedDfsDirsRedundancy, boolean format,
|
||||
|
|
|
@ -25,6 +25,7 @@ import static org.mockito.Mockito.spy;
|
|||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -54,10 +55,12 @@ public class TestConnCache {
|
|||
|
||||
static final int BLOCK_SIZE = 4096;
|
||||
static final int FILE_SIZE = 3 * BLOCK_SIZE;
|
||||
|
||||
final static int CACHE_SIZE = 4;
|
||||
final static long CACHE_EXPIRY_MS = 200;
|
||||
static Configuration conf = null;
|
||||
static MiniDFSCluster cluster = null;
|
||||
static FileSystem fs = null;
|
||||
static SocketCache cache;
|
||||
|
||||
static final Path testFile = new Path("/testConnCache.dat");
|
||||
static byte authenticData[] = null;
|
||||
|
@ -93,6 +96,9 @@ public class TestConnCache {
|
|||
public static void setupCluster() throws Exception {
|
||||
final int REPLICATION_FACTOR = 1;
|
||||
|
||||
/* create a socket cache. There is only one socket cache per jvm */
|
||||
cache = SocketCache.getInstance(CACHE_SIZE, CACHE_EXPIRY_MS);
|
||||
|
||||
util = new BlockReaderTestUtil(REPLICATION_FACTOR);
|
||||
cluster = util.getCluster();
|
||||
conf = util.getConf();
|
||||
|
@ -142,10 +148,7 @@ public class TestConnCache {
|
|||
* Test the SocketCache itself.
|
||||
*/
|
||||
@Test
|
||||
public void testSocketCache() throws IOException {
|
||||
final int CACHE_SIZE = 4;
|
||||
SocketCache cache = new SocketCache(CACHE_SIZE);
|
||||
|
||||
public void testSocketCache() throws Exception {
|
||||
// Make a client
|
||||
InetSocketAddress nnAddr =
|
||||
new InetSocketAddress("localhost", cluster.getNameNodePort());
|
||||
|
@ -159,6 +162,7 @@ public class TestConnCache {
|
|||
DataNode dn = util.getDataNode(block);
|
||||
InetSocketAddress dnAddr = dn.getXferAddress();
|
||||
|
||||
|
||||
// Make some sockets to the DN
|
||||
Socket[] dnSockets = new Socket[CACHE_SIZE];
|
||||
for (int i = 0; i < dnSockets.length; ++i) {
|
||||
|
@ -166,6 +170,7 @@ public class TestConnCache {
|
|||
dnAddr.getAddress(), dnAddr.getPort());
|
||||
}
|
||||
|
||||
|
||||
// Insert a socket to the NN
|
||||
Socket nnSock = new Socket(nnAddr.getAddress(), nnAddr.getPort());
|
||||
cache.put(nnSock, null);
|
||||
|
@ -179,7 +184,7 @@ public class TestConnCache {
|
|||
|
||||
assertEquals("NN socket evicted", null, cache.get(nnAddr));
|
||||
assertTrue("Evicted socket closed", nnSock.isClosed());
|
||||
|
||||
|
||||
// Lookup the DN socks
|
||||
for (Socket dnSock : dnSockets) {
|
||||
assertEquals("Retrieve cached sockets", dnSock, cache.get(dnAddr).sock);
|
||||
|
@ -189,6 +194,51 @@ public class TestConnCache {
|
|||
assertEquals("Cache is empty", 0, cache.size());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Test the SocketCache expiry.
|
||||
* Verify that socket cache entries expire after the set
|
||||
* expiry time.
|
||||
*/
|
||||
@Test
|
||||
public void testSocketCacheExpiry() throws Exception {
|
||||
// Make a client
|
||||
InetSocketAddress nnAddr =
|
||||
new InetSocketAddress("localhost", cluster.getNameNodePort());
|
||||
DFSClient client = new DFSClient(nnAddr, conf);
|
||||
|
||||
// Find out the DN addr
|
||||
LocatedBlock block =
|
||||
client.getNamenode().getBlockLocations(
|
||||
testFile.toString(), 0, FILE_SIZE)
|
||||
.getLocatedBlocks().get(0);
|
||||
DataNode dn = util.getDataNode(block);
|
||||
InetSocketAddress dnAddr = dn.getXferAddress();
|
||||
|
||||
|
||||
// Make some sockets to the DN and put in cache
|
||||
Socket[] dnSockets = new Socket[CACHE_SIZE];
|
||||
for (int i = 0; i < dnSockets.length; ++i) {
|
||||
dnSockets[i] = client.socketFactory.createSocket(
|
||||
dnAddr.getAddress(), dnAddr.getPort());
|
||||
cache.put(dnSockets[i], null);
|
||||
}
|
||||
|
||||
// Client side still has the sockets cached
|
||||
assertEquals(CACHE_SIZE, client.socketCache.size());
|
||||
|
||||
//sleep for a second and see if it expired
|
||||
Thread.sleep(CACHE_EXPIRY_MS + 1000);
|
||||
|
||||
// Client side has no sockets cached
|
||||
assertEquals(0, client.socketCache.size());
|
||||
|
||||
//sleep for another second and see if
|
||||
//the daemon thread runs fine on empty cache
|
||||
Thread.sleep(CACHE_EXPIRY_MS + 1000);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Read a file served entirely from one DN. Seek around and read from
|
||||
* different offsets. And verify that they all use the same socket.
|
||||
|
@ -229,33 +279,6 @@ public class TestConnCache {
|
|||
|
||||
in.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that the socket cache can be disabled by setting the capacity to
|
||||
* 0. Regression test for HDFS-3365.
|
||||
*/
|
||||
@Test
|
||||
public void testDisableCache() throws IOException {
|
||||
LOG.info("Starting testDisableCache()");
|
||||
|
||||
// Reading with the normally configured filesystem should
|
||||
// cache a socket.
|
||||
DFSTestUtil.readFile(fs, testFile);
|
||||
assertEquals(1, ((DistributedFileSystem)fs).dfs.socketCache.size());
|
||||
|
||||
// Configure a new instance with no caching, ensure that it doesn't
|
||||
// cache anything
|
||||
Configuration confWithoutCache = new Configuration(fs.getConf());
|
||||
confWithoutCache.setInt(
|
||||
DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0);
|
||||
FileSystem fsWithoutCache = FileSystem.newInstance(confWithoutCache);
|
||||
try {
|
||||
DFSTestUtil.readFile(fsWithoutCache, testFile);
|
||||
assertEquals(0, ((DistributedFileSystem)fsWithoutCache).dfs.socketCache.size());
|
||||
} finally {
|
||||
fsWithoutCache.close();
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void teardownCluster() throws Exception {
|
||||
|
|
|
@ -37,7 +37,6 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
|
@ -141,13 +140,6 @@ public class TestDataTransferProtocol {
|
|||
}
|
||||
}
|
||||
|
||||
void createFile(FileSystem fs, Path path, int fileLen) throws IOException {
|
||||
byte [] arr = new byte[fileLen];
|
||||
FSDataOutputStream out = fs.create(path);
|
||||
out.write(arr);
|
||||
out.close();
|
||||
}
|
||||
|
||||
void readFile(FileSystem fs, Path path, int fileLen) throws IOException {
|
||||
byte [] arr = new byte[fileLen];
|
||||
FSDataInputStream in = fs.open(path);
|
||||
|
@ -357,7 +349,9 @@ public class TestDataTransferProtocol {
|
|||
|
||||
int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);
|
||||
|
||||
createFile(fileSys, file, fileLen);
|
||||
DFSTestUtil.createFile(fileSys, file, fileLen, fileLen,
|
||||
fileSys.getDefaultBlockSize(file),
|
||||
fileSys.getDefaultReplication(file), 0L);
|
||||
|
||||
// get the first blockid for the file
|
||||
final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.net.InetSocketAddress;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
|
@ -91,6 +92,58 @@ public class TestDatanodeRegistration {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testChangeStorageID() throws Exception {
|
||||
final String DN_IP_ADDR = "127.0.0.1";
|
||||
final String DN_HOSTNAME = "localhost";
|
||||
final int DN_XFER_PORT = 12345;
|
||||
final int DN_INFO_PORT = 12346;
|
||||
final int DN_IPC_PORT = 12347;
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster = null;
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(0)
|
||||
.build();
|
||||
InetSocketAddress addr = new InetSocketAddress(
|
||||
"localhost",
|
||||
cluster.getNameNodePort());
|
||||
DFSClient client = new DFSClient(addr, conf);
|
||||
NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
|
||||
|
||||
// register a datanode
|
||||
DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
|
||||
"fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT);
|
||||
long nnCTime = cluster.getNamesystem().getFSImage().getStorage()
|
||||
.getCTime();
|
||||
StorageInfo mockStorageInfo = mock(StorageInfo.class);
|
||||
doReturn(nnCTime).when(mockStorageInfo).getCTime();
|
||||
doReturn(HdfsConstants.LAYOUT_VERSION).when(mockStorageInfo)
|
||||
.getLayoutVersion();
|
||||
DatanodeRegistration dnReg = new DatanodeRegistration(dnId,
|
||||
mockStorageInfo, null, VersionInfo.getVersion());
|
||||
rpcServer.registerDatanode(dnReg);
|
||||
|
||||
DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
|
||||
assertEquals("Expected a registered datanode", 1, report.length);
|
||||
|
||||
// register the same datanode again with a different storage ID
|
||||
dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
|
||||
"changed-fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT);
|
||||
dnReg = new DatanodeRegistration(dnId,
|
||||
mockStorageInfo, null, VersionInfo.getVersion());
|
||||
rpcServer.registerDatanode(dnReg);
|
||||
|
||||
report = client.datanodeReport(DatanodeReportType.ALL);
|
||||
assertEquals("Datanode with changed storage ID not recognized",
|
||||
1, report.length);
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRegistrationWithDifferentSoftwareVersions() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
|
|
@ -120,12 +120,9 @@ public class TestDistributedFileSystem {
|
|||
DFSTestUtil.readFile(fileSys, p);
|
||||
|
||||
DFSClient client = ((DistributedFileSystem)fileSys).dfs;
|
||||
SocketCache cache = client.socketCache;
|
||||
assertEquals(1, cache.size());
|
||||
|
||||
fileSys.close();
|
||||
|
||||
assertEquals(0, cache.size());
|
||||
} finally {
|
||||
if (cluster != null) {cluster.shutdown();}
|
||||
}
|
||||
|
|
|
@ -79,7 +79,8 @@ public class TestFileStatus {
|
|||
hftpfs = cluster.getHftpFileSystem(0);
|
||||
dfsClient = new DFSClient(NameNode.getAddress(conf), conf);
|
||||
file1 = new Path("filestatus.dat");
|
||||
writeFile(fs, file1, 1, fileSize, blockSize);
|
||||
DFSTestUtil.createFile(fs, file1, fileSize, fileSize, blockSize, (short) 1,
|
||||
seed);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -87,18 +88,6 @@ public class TestFileStatus {
|
|||
fs.close();
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
private static void writeFile(FileSystem fileSys, Path name, int repl,
|
||||
int fileSize, int blockSize) throws IOException {
|
||||
// Create and write a file that contains three blocks of data
|
||||
FSDataOutputStream stm = fileSys.create(name, true,
|
||||
HdfsConstants.IO_FILE_BUFFER_SIZE, (short)repl, (long)blockSize);
|
||||
byte[] buffer = new byte[fileSize];
|
||||
Random rand = new Random(seed);
|
||||
rand.nextBytes(buffer);
|
||||
stm.write(buffer);
|
||||
stm.close();
|
||||
}
|
||||
|
||||
private void checkFile(FileSystem fileSys, Path name, int repl)
|
||||
throws IOException, InterruptedException, TimeoutException {
|
||||
|
@ -218,7 +207,8 @@ public class TestFileStatus {
|
|||
|
||||
// create another file that is smaller than a block.
|
||||
Path file2 = new Path(dir, "filestatus2.dat");
|
||||
writeFile(fs, file2, 1, blockSize/4, blockSize);
|
||||
DFSTestUtil.createFile(fs, file2, blockSize/4, blockSize/4, blockSize,
|
||||
(short) 1, seed);
|
||||
checkFile(fs, file2, 1);
|
||||
|
||||
// verify file attributes
|
||||
|
@ -230,7 +220,8 @@ public class TestFileStatus {
|
|||
|
||||
// Create another file in the same directory
|
||||
Path file3 = new Path(dir, "filestatus3.dat");
|
||||
writeFile(fs, file3, 1, blockSize/4, blockSize);
|
||||
DFSTestUtil.createFile(fs, file3, blockSize/4, blockSize/4, blockSize,
|
||||
(short) 1, seed);
|
||||
checkFile(fs, file3, 1);
|
||||
file3 = fs.makeQualified(file3);
|
||||
|
||||
|
|
|
@ -110,9 +110,7 @@ public class TestGetBlocks {
|
|||
// do the writing but do not close the FSDataOutputStream
|
||||
// in order to mimic the ongoing writing
|
||||
final Path fileName = new Path("/file1");
|
||||
stm = fileSys.create(
|
||||
fileName,
|
||||
true,
|
||||
stm = fileSys.create(fileName, true,
|
||||
fileSys.getConf().getInt(
|
||||
CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
|
||||
(short) 3, blockSize);
|
||||
|
@ -180,29 +178,15 @@ public class TestGetBlocks {
|
|||
|
||||
final short REPLICATION_FACTOR = (short) 2;
|
||||
final int DEFAULT_BLOCK_SIZE = 1024;
|
||||
final Random r = new Random();
|
||||
|
||||
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
|
||||
REPLICATION_FACTOR).build();
|
||||
try {
|
||||
cluster.waitActive();
|
||||
|
||||
// create a file with two blocks
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
FSDataOutputStream out = fs.create(new Path("/tmp.txt"),
|
||||
REPLICATION_FACTOR);
|
||||
byte[] data = new byte[1024];
|
||||
long fileLen = 2 * DEFAULT_BLOCK_SIZE;
|
||||
long bytesToWrite = fileLen;
|
||||
while (bytesToWrite > 0) {
|
||||
r.nextBytes(data);
|
||||
int bytesToWriteNext = (1024 < bytesToWrite) ? 1024
|
||||
: (int) bytesToWrite;
|
||||
out.write(data, 0, bytesToWriteNext);
|
||||
bytesToWrite -= bytesToWriteNext;
|
||||
}
|
||||
out.close();
|
||||
DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/tmp.txt"),
|
||||
fileLen, REPLICATION_FACTOR, 0L);
|
||||
|
||||
// get blocks & data nodes
|
||||
List<LocatedBlock> locatedBlocks;
|
||||
|
|
|
@ -41,6 +41,8 @@ public class TestHftpDelegationToken {
|
|||
|
||||
@Test
|
||||
public void testHdfsDelegationToken() throws Exception {
|
||||
SecurityUtilTestHelper.setTokenServiceUseIp(true);
|
||||
|
||||
final Configuration conf = new Configuration();
|
||||
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
|
@ -265,4 +267,4 @@ public class TestHftpDelegationToken {
|
|||
@Override
|
||||
protected void initDelegationToken() throws IOException {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -102,9 +102,15 @@ public class TestHftpFileSystem {
|
|||
|
||||
@AfterClass
|
||||
public static void tearDown() throws IOException {
|
||||
hdfs.close();
|
||||
hftpFs.close();
|
||||
cluster.shutdown();
|
||||
if (hdfs != null) {
|
||||
hdfs.close();
|
||||
}
|
||||
if (hftpFs != null) {
|
||||
hftpFs.close();
|
||||
}
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -53,19 +53,23 @@ public class TestHftpURLTimeouts {
|
|||
boolean timedout = false;
|
||||
|
||||
HftpFileSystem fs = (HftpFileSystem)FileSystem.get(uri, conf);
|
||||
HttpURLConnection conn = fs.openConnection("/", "");
|
||||
timedout = false;
|
||||
try {
|
||||
// this will consume the only slot in the backlog
|
||||
conn.getInputStream();
|
||||
} catch (SocketTimeoutException ste) {
|
||||
timedout = true;
|
||||
assertEquals("Read timed out", ste.getMessage());
|
||||
HttpURLConnection conn = fs.openConnection("/", "");
|
||||
timedout = false;
|
||||
try {
|
||||
// this will consume the only slot in the backlog
|
||||
conn.getInputStream();
|
||||
} catch (SocketTimeoutException ste) {
|
||||
timedout = true;
|
||||
assertEquals("Read timed out", ste.getMessage());
|
||||
} finally {
|
||||
if (conn != null) conn.disconnect();
|
||||
}
|
||||
assertTrue("read timedout", timedout);
|
||||
assertTrue("connect timedout", checkConnectTimeout(fs, false));
|
||||
} finally {
|
||||
if (conn != null) conn.disconnect();
|
||||
fs.close();
|
||||
}
|
||||
assertTrue("read timedout", timedout);
|
||||
assertTrue("connect timedout", checkConnectTimeout(fs, false));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -79,20 +83,24 @@ public class TestHftpURLTimeouts {
|
|||
boolean timedout = false;
|
||||
|
||||
HsftpFileSystem fs = (HsftpFileSystem)FileSystem.get(uri, conf);
|
||||
HttpURLConnection conn = null;
|
||||
timedout = false;
|
||||
try {
|
||||
// this will consume the only slot in the backlog
|
||||
conn = fs.openConnection("/", "");
|
||||
} catch (SocketTimeoutException ste) {
|
||||
// SSL expects a negotiation, so it will timeout on read, unlike hftp
|
||||
timedout = true;
|
||||
assertEquals("Read timed out", ste.getMessage());
|
||||
HttpURLConnection conn = null;
|
||||
timedout = false;
|
||||
try {
|
||||
// this will consume the only slot in the backlog
|
||||
conn = fs.openConnection("/", "");
|
||||
} catch (SocketTimeoutException ste) {
|
||||
// SSL expects a negotiation, so it will timeout on read, unlike hftp
|
||||
timedout = true;
|
||||
assertEquals("Read timed out", ste.getMessage());
|
||||
} finally {
|
||||
if (conn != null) conn.disconnect();
|
||||
}
|
||||
assertTrue("ssl read connect timedout", timedout);
|
||||
assertTrue("connect timedout", checkConnectTimeout(fs, true));
|
||||
} finally {
|
||||
if (conn != null) conn.disconnect();
|
||||
fs.close();
|
||||
}
|
||||
assertTrue("ssl read connect timedout", timedout);
|
||||
assertTrue("connect timedout", checkConnectTimeout(fs, true));
|
||||
}
|
||||
|
||||
private boolean checkConnectTimeout(HftpFileSystem fs, boolean ignoreReadTimeout)
|
||||
|
|
|
@ -52,22 +52,6 @@ public class TestInjectionForSimulatedStorage {
|
|||
private static final Log LOG = LogFactory.getLog(
|
||||
"org.apache.hadoop.hdfs.TestInjectionForSimulatedStorage");
|
||||
|
||||
|
||||
private void writeFile(FileSystem fileSys, Path name, int repl)
|
||||
throws IOException {
|
||||
// create and write a file that contains three blocks of data
|
||||
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
|
||||
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
|
||||
(short) repl, blockSize);
|
||||
byte[] buffer = new byte[filesize];
|
||||
for (int i=0; i<buffer.length; i++) {
|
||||
buffer[i] = '1';
|
||||
}
|
||||
stm.write(buffer);
|
||||
stm.close();
|
||||
}
|
||||
|
||||
// Waits for all of the blocks to have expected replication
|
||||
|
||||
// Waits for all of the blocks to have expected replication
|
||||
private void waitForBlockReplication(String filename,
|
||||
|
@ -149,7 +133,8 @@ public class TestInjectionForSimulatedStorage {
|
|||
cluster.getNameNodePort()),
|
||||
conf);
|
||||
|
||||
writeFile(cluster.getFileSystem(), testPath, numDataNodes);
|
||||
DFSTestUtil.createFile(cluster.getFileSystem(), testPath, filesize,
|
||||
filesize, blockSize, (short) numDataNodes, 0L);
|
||||
waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
|
||||
Iterable<Block>[] blocksList = cluster.getAllBlockReports(bpid);
|
||||
|
||||
|
|
|
@ -50,19 +50,6 @@ public class TestModTime {
|
|||
Random myrand = new Random();
|
||||
Path hostsFile;
|
||||
Path excludeFile;
|
||||
|
||||
private void writeFile(FileSystem fileSys, Path name, int repl)
|
||||
throws IOException {
|
||||
// create and write a file that contains three blocks of data
|
||||
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
|
||||
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
|
||||
(short) repl, blockSize);
|
||||
byte[] buffer = new byte[fileSize];
|
||||
Random rand = new Random(seed);
|
||||
rand.nextBytes(buffer);
|
||||
stm.write(buffer);
|
||||
stm.close();
|
||||
}
|
||||
|
||||
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
|
||||
assertTrue(fileSys.exists(name));
|
||||
|
@ -105,7 +92,8 @@ public class TestModTime {
|
|||
System.out.println("Creating testdir1 and testdir1/test1.dat.");
|
||||
Path dir1 = new Path("testdir1");
|
||||
Path file1 = new Path(dir1, "test1.dat");
|
||||
writeFile(fileSys, file1, replicas);
|
||||
DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
|
||||
(short) replicas, seed);
|
||||
FileStatus stat = fileSys.getFileStatus(file1);
|
||||
long mtime1 = stat.getModificationTime();
|
||||
assertTrue(mtime1 != 0);
|
||||
|
@ -120,7 +108,8 @@ public class TestModTime {
|
|||
//
|
||||
System.out.println("Creating testdir1/test2.dat.");
|
||||
Path file2 = new Path(dir1, "test2.dat");
|
||||
writeFile(fileSys, file2, replicas);
|
||||
DFSTestUtil.createFile(fileSys, file2, fileSize, fileSize, blockSize,
|
||||
(short) replicas, seed);
|
||||
stat = fileSys.getFileStatus(file2);
|
||||
|
||||
//
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue