Merge branch 'trunk' into HDFS-7240
Conflicts: hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml hadoop-hdfs-project/hadoop-hdfs/pom.xml hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs hadoop-project/pom.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
This commit is contained in:
commit
d0bd0f6233
|
@ -765,6 +765,7 @@ hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js
|
|||
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
|
||||
Apache HBase - Server which contains JQuery minified javascript library version 1.8.3
|
||||
Microsoft JDBC Driver for SQLServer - version 6.2.1.jre7
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Copyright 2005, 2012, 2013 jQuery Foundation and other contributors, https://jquery.org/
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
##
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
##
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -61,6 +61,7 @@
|
|||
<include>stop-yarn.sh</include>
|
||||
<include>start-yarn.cmd</include>
|
||||
<include>stop-yarn.cmd</include>
|
||||
<include>FederationStateStore**/**</include>
|
||||
</includes>
|
||||
<fileMode>0755</fileMode>
|
||||
</fileSet>
|
||||
|
|
|
@ -46,7 +46,6 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-enforcer-plugin</artifactId>
|
||||
<version>1.4</version>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
|
|
|
@ -50,7 +50,6 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-enforcer-plugin</artifactId>
|
||||
<version>1.4</version>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
|
|
|
@ -126,6 +126,10 @@
|
|||
<groupId>javax.xml.bind</groupId>
|
||||
<artifactId>jaxb-api</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>xml-apis</groupId>
|
||||
<artifactId>xml-apis</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro</artifactId>
|
||||
|
@ -624,6 +628,13 @@
|
|||
<exclude>**/*.class</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
<filter>
|
||||
<artifact>org.apache.hadoop:hadoop-mapreduce-client-jobclient:*</artifact>
|
||||
<excludes>
|
||||
<exclude>testjar/*</exclude>
|
||||
<exclude>testshell/*</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
</filters>
|
||||
<relocations>
|
||||
<relocation>
|
||||
|
@ -646,6 +657,7 @@
|
|||
<exclude>org/junit/*</exclude>
|
||||
<exclude>org/junit/**/*</exclude>
|
||||
<!-- Not the org/ packages that are a part of the jdk -->
|
||||
|
||||
<exclude>org/ietf/jgss/*</exclude>
|
||||
<exclude>org/omg/**/*</exclude>
|
||||
<exclude>org/w3c/dom/*</exclude>
|
||||
|
@ -654,6 +666,13 @@
|
|||
<exclude>org/xml/sax/**/*</exclude>
|
||||
</excludes>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>contribs/</pattern>
|
||||
<shadedPattern>${shaded.dependency.prefix}.contribs.</shadedPattern>
|
||||
<excludes>
|
||||
<exclude>**/pom.xml</exclude>
|
||||
</excludes>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>com/</pattern>
|
||||
<shadedPattern>${shaded.dependency.prefix}.com.</shadedPattern>
|
||||
|
@ -691,6 +710,13 @@
|
|||
<exclude>io/serializations</exclude>
|
||||
</excludes>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>javassist/</pattern>
|
||||
<shadedPattern>${shaded.dependency.prefix}.javassist.</shadedPattern>
|
||||
<excludes>
|
||||
<exclude>**/pom.xml</exclude>
|
||||
</excludes>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>javax/el/</pattern>
|
||||
<shadedPattern>${shaded.dependency.prefix}.javax.el.</shadedPattern>
|
||||
|
@ -712,6 +738,13 @@
|
|||
<exclude>**/pom.xml</exclude>
|
||||
</excludes>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>jersey/</pattern>
|
||||
<shadedPattern>${shaded.dependency.prefix}.jersey.</shadedPattern>
|
||||
<excludes>
|
||||
<exclude>**/pom.xml</exclude>
|
||||
</excludes>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>net/</pattern>
|
||||
<shadedPattern>${shaded.dependency.prefix}.net.</shadedPattern>
|
||||
|
|
|
@ -174,13 +174,6 @@
|
|||
<exclude>org/apache/jasper/compiler/Localizer.class</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
<!-- We only have xerces as a dependency for XML output for the fsimage edits, we don't need anything specific to it for javax xml support -->
|
||||
<filter>
|
||||
<artifact>xerces:xercesImpl</artifact>
|
||||
<excludes>
|
||||
<exclude>META-INF/services/*</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
<!-- We rely on jersey for our web interfaces. We want to use its java services stuff only internal to jersey -->
|
||||
<filter>
|
||||
<artifact>com.sun.jersey:*</artifact>
|
||||
|
|
|
@ -323,6 +323,10 @@
|
|||
<!-- protobuf generated code -->
|
||||
<Class name="~org\.apache\.hadoop\.tracing\.TraceAdminPB.*"/>
|
||||
</Match>
|
||||
<Match>
|
||||
<!-- protobuf generated code -->
|
||||
<Class name="~org\.apache\.hadoop\.fs\.FSProto.*"/>
|
||||
</Match>
|
||||
|
||||
<!--
|
||||
Manually checked, misses child thread manually syncing on parent's intrinsic lock.
|
||||
|
|
|
@ -179,7 +179,7 @@
|
|||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
<scope>runtime</scope>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.mockito</groupId>
|
||||
|
@ -393,6 +393,7 @@
|
|||
<include>RefreshUserMappingsProtocol.proto</include>
|
||||
<include>RefreshCallQueueProtocol.proto</include>
|
||||
<include>GenericRefreshProtocol.proto</include>
|
||||
<include>FSProtos.proto</include>
|
||||
</includes>
|
||||
</source>
|
||||
</configuration>
|
||||
|
|
|
@ -30,20 +30,20 @@ function hadoop_usage
|
|||
hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
|
||||
hadoop_add_option "workers" "turn on worker mode"
|
||||
|
||||
hadoop_add_subcommand "checknative" "check native Hadoop and compression libraries availability"
|
||||
hadoop_add_subcommand "classpath" "prints the class path needed to get the Hadoop jar and the required libraries"
|
||||
hadoop_add_subcommand "conftest" "validate configuration XML files"
|
||||
hadoop_add_subcommand "credential" "interact with credential providers"
|
||||
hadoop_add_subcommand "daemonlog" "get/set the log level for each daemon"
|
||||
hadoop_add_subcommand "dtutil" "operations related to delegation tokens"
|
||||
hadoop_add_subcommand "envvars" "display computed Hadoop environment variables"
|
||||
hadoop_add_subcommand "fs" "run a generic filesystem user client"
|
||||
hadoop_add_subcommand "jar <jar>" "run a jar file. NOTE: please use \"yarn jar\" to launch YARN applications, not this command."
|
||||
hadoop_add_subcommand "jnipath" "prints the java.library.path"
|
||||
hadoop_add_subcommand "kerbname" "show auth_to_local principal conversion"
|
||||
hadoop_add_subcommand "key" "manage keys via the KeyProvider"
|
||||
hadoop_add_subcommand "trace" "view and modify Hadoop tracing settings"
|
||||
hadoop_add_subcommand "version" "print the version"
|
||||
hadoop_add_subcommand "checknative" client "check native Hadoop and compression libraries availability"
|
||||
hadoop_add_subcommand "classpath" client "prints the class path needed to get the Hadoop jar and the required libraries"
|
||||
hadoop_add_subcommand "conftest" client "validate configuration XML files"
|
||||
hadoop_add_subcommand "credential" client "interact with credential providers"
|
||||
hadoop_add_subcommand "daemonlog" admin "get/set the log level for each daemon"
|
||||
hadoop_add_subcommand "dtutil" client "operations related to delegation tokens"
|
||||
hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
|
||||
hadoop_add_subcommand "fs" client "run a generic filesystem user client"
|
||||
hadoop_add_subcommand "jar <jar>" client "run a jar file. NOTE: please use \"yarn jar\" to launch YARN applications, not this command."
|
||||
hadoop_add_subcommand "jnipath" client "prints the java.library.path"
|
||||
hadoop_add_subcommand "kerbname" client "show auth_to_local principal conversion"
|
||||
hadoop_add_subcommand "key" client "manage keys via the KeyProvider"
|
||||
hadoop_add_subcommand "trace" client "view and modify Hadoop tracing settings"
|
||||
hadoop_add_subcommand "version" client "print the version"
|
||||
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
# be done outside of a function
|
||||
declare -a HADOOP_SUBCMD_USAGE
|
||||
declare -a HADOOP_OPTION_USAGE
|
||||
declare -a HADOOP_SUBCMD_USAGE_TYPES
|
||||
|
||||
## @description Print a message to stderr
|
||||
## @audience public
|
||||
|
@ -115,6 +116,89 @@ function hadoop_verify_entry
|
|||
[[ ${!1} =~ \ ${2}\ ]]
|
||||
}
|
||||
|
||||
## @description Check if an array has a given value
|
||||
## @audience public
|
||||
## @stability stable
|
||||
## @replaceable yes
|
||||
## @param element
|
||||
## @param array
|
||||
## @returns 0 = yes
|
||||
## @returns 1 = no
|
||||
function hadoop_array_contains
|
||||
{
|
||||
declare element=$1
|
||||
shift
|
||||
declare val
|
||||
|
||||
if [[ "$#" -eq 0 ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
for val in "${@}"; do
|
||||
if [[ "${val}" == "${element}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
## @description Add the `appendstring` if `checkstring` is not
|
||||
## @description present in the given array
|
||||
## @audience public
|
||||
## @stability stable
|
||||
## @replaceable yes
|
||||
## @param envvar
|
||||
## @param appendstring
|
||||
function hadoop_add_array_param
|
||||
{
|
||||
declare arrname=$1
|
||||
declare add=$2
|
||||
|
||||
declare arrref="${arrname}[@]"
|
||||
declare array=("${!arrref}")
|
||||
|
||||
if ! hadoop_array_contains "${add}" "${array[@]}"; then
|
||||
#shellcheck disable=SC1083,SC2086
|
||||
eval ${arrname}=\(\"\${array[@]}\" \"${add}\" \)
|
||||
hadoop_debug "$1 accepted $2"
|
||||
else
|
||||
hadoop_debug "$1 declined $2"
|
||||
fi
|
||||
}
|
||||
|
||||
## @description Sort an array (must not contain regexps)
|
||||
## @description present in the given array
|
||||
## @audience public
|
||||
## @stability stable
|
||||
## @replaceable yes
|
||||
## @param arrayvar
|
||||
function hadoop_sort_array
|
||||
{
|
||||
declare arrname=$1
|
||||
declare arrref="${arrname}[@]"
|
||||
declare array=("${!arrref}")
|
||||
declare oifs
|
||||
|
||||
declare globstatus
|
||||
declare -a sa
|
||||
|
||||
globstatus=$(set -o | grep noglob | awk '{print $NF}')
|
||||
|
||||
set -f
|
||||
oifs=${IFS}
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
IFS=$'\n' sa=($(sort <<<"${array[*]}"))
|
||||
|
||||
# shellcheck disable=SC1083
|
||||
eval "${arrname}"=\(\"\${sa[@]}\"\)
|
||||
|
||||
IFS=${oifs}
|
||||
if [[ "${globstatus}" = off ]]; then
|
||||
set +f
|
||||
fi
|
||||
}
|
||||
|
||||
## @description Check if we are running with priv
|
||||
## @description by default, this implementation looks for
|
||||
## @description EUID=0. For OSes that have true priv
|
||||
|
@ -220,13 +304,20 @@ function hadoop_uservar_su
|
|||
## @stability evolving
|
||||
## @replaceable no
|
||||
## @param subcommand
|
||||
## @param subcommandtype
|
||||
## @param subcommanddesc
|
||||
function hadoop_add_subcommand
|
||||
{
|
||||
local subcmd=$1
|
||||
local text=$2
|
||||
declare subcmd=$1
|
||||
declare subtype=$2
|
||||
declare text=$3
|
||||
|
||||
HADOOP_SUBCMD_USAGE[${HADOOP_SUBCMD_USAGE_COUNTER}]="${subcmd}@${text}"
|
||||
hadoop_debug "${subcmd} as a ${subtype}"
|
||||
|
||||
hadoop_add_array_param HADOOP_SUBCMD_USAGE_TYPES "${subtype}"
|
||||
|
||||
# done in this order so that sort works later
|
||||
HADOOP_SUBCMD_USAGE[${HADOOP_SUBCMD_USAGE_COUNTER}]="${subcmd}@${subtype}@${text}"
|
||||
((HADOOP_SUBCMD_USAGE_COUNTER=HADOOP_SUBCMD_USAGE_COUNTER+1))
|
||||
}
|
||||
|
||||
|
@ -253,17 +344,22 @@ function hadoop_reset_usage
|
|||
{
|
||||
HADOOP_SUBCMD_USAGE=()
|
||||
HADOOP_OPTION_USAGE=()
|
||||
HADOOP_SUBCMD_USAGE_TYPES=()
|
||||
HADOOP_SUBCMD_USAGE_COUNTER=0
|
||||
HADOOP_OPTION_USAGE_COUNTER=0
|
||||
}
|
||||
|
||||
## @description Print a screen-size aware two-column output
|
||||
## @description if reqtype is not null, only print those requested
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable no
|
||||
## @param reqtype
|
||||
## @param array
|
||||
function hadoop_generic_columnprinter
|
||||
{
|
||||
declare reqtype=$1
|
||||
shift
|
||||
declare -a input=("$@")
|
||||
declare -i i=0
|
||||
declare -i counter=0
|
||||
|
@ -275,11 +371,13 @@ function hadoop_generic_columnprinter
|
|||
declare -i foldsize
|
||||
declare -a tmpa
|
||||
declare numcols
|
||||
declare brup
|
||||
|
||||
if [[ -n "${COLUMNS}" ]]; then
|
||||
numcols=${COLUMNS}
|
||||
else
|
||||
numcols=$(tput cols) 2>/dev/null
|
||||
COLUMNS=${numcols}
|
||||
fi
|
||||
|
||||
if [[ -z "${numcols}"
|
||||
|
@ -292,7 +390,8 @@ function hadoop_generic_columnprinter
|
|||
while read -r line; do
|
||||
tmpa[${counter}]=${line}
|
||||
((counter=counter+1))
|
||||
option=$(echo "${line}" | cut -f1 -d'@')
|
||||
IFS='@' read -ra brup <<< "${line}"
|
||||
option="${brup[0]}"
|
||||
if [[ ${#option} -gt ${maxoptsize} ]]; then
|
||||
maxoptsize=${#option}
|
||||
fi
|
||||
|
@ -304,8 +403,22 @@ function hadoop_generic_columnprinter
|
|||
((foldsize=numcols-maxoptsize))
|
||||
|
||||
until [[ $i -eq ${#tmpa[@]} ]]; do
|
||||
option=$(echo "${tmpa[$i]}" | cut -f1 -d'@')
|
||||
giventext=$(echo "${tmpa[$i]}" | cut -f2 -d'@')
|
||||
IFS='@' read -ra brup <<< "${tmpa[$i]}"
|
||||
|
||||
option="${brup[0]}"
|
||||
cmdtype="${brup[1]}"
|
||||
giventext="${brup[2]}"
|
||||
|
||||
if [[ -n "${reqtype}" ]]; then
|
||||
if [[ "${cmdtype}" != "${reqtype}" ]]; then
|
||||
((i=i+1))
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -z "${giventext}" ]]; then
|
||||
giventext=${cmdtype}
|
||||
fi
|
||||
|
||||
while read -r line; do
|
||||
printf "%-${maxoptsize}s %-s\n" "${option}" "${line}"
|
||||
|
@ -325,13 +438,14 @@ function hadoop_generic_columnprinter
|
|||
## @param [text to use in place of SUBCOMMAND]
|
||||
function hadoop_generate_usage
|
||||
{
|
||||
local cmd=$1
|
||||
local takesclass=$2
|
||||
local subcmdtext=${3:-"SUBCOMMAND"}
|
||||
local haveoptions
|
||||
local optstring
|
||||
local havesubs
|
||||
local subcmdstring
|
||||
declare cmd=$1
|
||||
declare takesclass=$2
|
||||
declare subcmdtext=${3:-"SUBCOMMAND"}
|
||||
declare haveoptions
|
||||
declare optstring
|
||||
declare havesubs
|
||||
declare subcmdstring
|
||||
declare cmdtype
|
||||
|
||||
cmd=${cmd##*/}
|
||||
|
||||
|
@ -358,7 +472,7 @@ function hadoop_generate_usage
|
|||
echo " OPTIONS is none or any of:"
|
||||
echo ""
|
||||
|
||||
hadoop_generic_columnprinter "${HADOOP_OPTION_USAGE[@]}"
|
||||
hadoop_generic_columnprinter "" "${HADOOP_OPTION_USAGE[@]}"
|
||||
fi
|
||||
|
||||
if [[ "${havesubs}" = true ]]; then
|
||||
|
@ -366,7 +480,18 @@ function hadoop_generate_usage
|
|||
echo " ${subcmdtext} is one of:"
|
||||
echo ""
|
||||
|
||||
hadoop_generic_columnprinter "${HADOOP_SUBCMD_USAGE[@]}"
|
||||
if [[ "${#HADOOP_SUBCMD_USAGE_TYPES[@]}" -gt 0 ]]; then
|
||||
|
||||
hadoop_sort_array HADOOP_SUBCMD_USAGE_TYPES
|
||||
for subtype in "${HADOOP_SUBCMD_USAGE_TYPES[@]}"; do
|
||||
#shellcheck disable=SC2086
|
||||
cmdtype="$(tr '[:lower:]' '[:upper:]' <<< ${subtype:0:1})${subtype:1}"
|
||||
printf "\n %s Commands:\n\n" "${cmdtype}"
|
||||
hadoop_generic_columnprinter "${subtype}" "${HADOOP_SUBCMD_USAGE[@]}"
|
||||
done
|
||||
else
|
||||
hadoop_generic_columnprinter "" "${HADOOP_SUBCMD_USAGE[@]}"
|
||||
fi
|
||||
echo ""
|
||||
echo "${subcmdtext} may print help when invoked w/o parameters or with -h."
|
||||
fi
|
||||
|
@ -1873,11 +1998,9 @@ function hadoop_start_secure_daemon_wrapper
|
|||
(( counter++ ))
|
||||
done
|
||||
|
||||
# this is for the daemon pid creation
|
||||
#shellcheck disable=SC2086
|
||||
echo $! > "${jsvcpidfile}" 2>/dev/null
|
||||
if [[ $? -gt 0 ]]; then
|
||||
hadoop_error "ERROR: Cannot write ${daemonname} pid ${daemonpidfile}."
|
||||
if ! echo $! > "${jsvcpidfile}"; then
|
||||
hadoop_error "ERROR: Cannot write ${daemonname} pid ${jsvcpidfile}."
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
|
@ -2455,29 +2578,6 @@ function hadoop_parse_args
|
|||
hadoop_debug "hadoop_parse: asking caller to skip ${HADOOP_PARSE_COUNTER}"
|
||||
}
|
||||
|
||||
## @description XML-escapes the characters (&'"<>) in the given parameter.
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable yes
|
||||
## @param string
|
||||
## @return XML-escaped string
|
||||
function hadoop_xml_escape
|
||||
{
|
||||
sed -e 's/&/\&/g' -e 's/"/\\\"/g' \
|
||||
-e "s/'/\\\\\'/g" -e 's/</\\\</g' -e 's/>/\\\>/g' <<< "$1"
|
||||
}
|
||||
|
||||
## @description sed-escapes the characters (\/&) in the given parameter.
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable yes
|
||||
## @param string
|
||||
## @return sed-escaped string
|
||||
function hadoop_sed_escape
|
||||
{
|
||||
sed -e 's/[\/&]/\\&/g' <<< "$1"
|
||||
}
|
||||
|
||||
## @description Handle subcommands from main program entries
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
package org.apache.hadoop.conf;
|
||||
|
||||
import com.ctc.wstx.io.StreamBootstrapper;
|
||||
import com.ctc.wstx.io.SystemId;
|
||||
import com.ctc.wstx.stax.WstxInputFactory;
|
||||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
|
@ -94,7 +96,6 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory;
|
|||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.StringInterner;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.codehaus.stax2.XMLInputFactory2;
|
||||
import org.codehaus.stax2.XMLStreamReader2;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -285,7 +286,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
* Specify exact input factory to avoid time finding correct one.
|
||||
* Factory is reusable across un-synchronized threads once initialized
|
||||
*/
|
||||
private static final XMLInputFactory2 XML_INPUT_FACTORY = new WstxInputFactory();
|
||||
private static final WstxInputFactory XML_INPUT_FACTORY =
|
||||
new WstxInputFactory();
|
||||
|
||||
/**
|
||||
* Class to keep the information about the keys which replace the deprecated
|
||||
|
@ -2647,15 +2649,18 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
return parse(connection.getInputStream(), url.toString());
|
||||
}
|
||||
|
||||
private XMLStreamReader parse(InputStream is,
|
||||
String systemId) throws IOException, XMLStreamException {
|
||||
private XMLStreamReader parse(InputStream is, String systemIdStr)
|
||||
throws IOException, XMLStreamException {
|
||||
if (!quietmode) {
|
||||
LOG.debug("parsing input stream " + is);
|
||||
}
|
||||
if (is == null) {
|
||||
return null;
|
||||
}
|
||||
return XML_INPUT_FACTORY.createXMLStreamReader(systemId, is);
|
||||
SystemId systemId = SystemId.construct(systemIdStr);
|
||||
return XML_INPUT_FACTORY.createSR(XML_INPUT_FACTORY.createPrivateConfig(),
|
||||
systemId, StreamBootstrapper.getInstance(null, systemId, is), false,
|
||||
true);
|
||||
}
|
||||
|
||||
private void loadResources(Properties properties,
|
||||
|
@ -2911,9 +2916,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
if(source != null) {
|
||||
updatingResource.put(attr, source);
|
||||
}
|
||||
} else if (!value.equals(properties.getProperty(attr))) {
|
||||
LOG.warn(name+":an attempt to override final parameter: "+attr
|
||||
+"; Ignoring.");
|
||||
} else {
|
||||
// This is a final parameter so check for overrides.
|
||||
checkForOverride(this.properties, name, attr, value);
|
||||
if (this.properties != properties) {
|
||||
checkForOverride(properties, name, attr, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (finalParameter && attr != null) {
|
||||
|
@ -2921,6 +2929,18 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Print a warning if a property with a given name already exists with a
|
||||
* different value
|
||||
*/
|
||||
private void checkForOverride(Properties properties, String name, String attr, String value) {
|
||||
String propertyValue = properties.getProperty(attr);
|
||||
if (propertyValue != null && !propertyValue.equals(value)) {
|
||||
LOG.warn(name + ":an attempt to override final parameter: " + attr
|
||||
+ "; Ignoring.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write out the non-default properties in this configuration to the given
|
||||
* {@link OutputStream} using UTF-8 encoding.
|
||||
|
@ -3126,7 +3146,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
JsonGenerator dumpGenerator = dumpFactory.createGenerator(out);
|
||||
dumpGenerator.writeStartObject();
|
||||
dumpGenerator.writeFieldName("property");
|
||||
appendJSONProperty(dumpGenerator, config, propertyName);
|
||||
appendJSONProperty(dumpGenerator, config, propertyName,
|
||||
new ConfigRedactor(config));
|
||||
dumpGenerator.writeEndObject();
|
||||
dumpGenerator.flush();
|
||||
}
|
||||
|
@ -3166,11 +3187,11 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
dumpGenerator.writeFieldName("properties");
|
||||
dumpGenerator.writeStartArray();
|
||||
dumpGenerator.flush();
|
||||
ConfigRedactor redactor = new ConfigRedactor(config);
|
||||
synchronized (config) {
|
||||
for (Map.Entry<Object,Object> item: config.getProps().entrySet()) {
|
||||
appendJSONProperty(dumpGenerator,
|
||||
config,
|
||||
item.getKey().toString());
|
||||
appendJSONProperty(dumpGenerator, config, item.getKey().toString(),
|
||||
redactor);
|
||||
}
|
||||
}
|
||||
dumpGenerator.writeEndArray();
|
||||
|
@ -3188,12 +3209,14 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
* @throws IOException
|
||||
*/
|
||||
private static void appendJSONProperty(JsonGenerator jsonGen,
|
||||
Configuration config, String name) throws IOException {
|
||||
Configuration config, String name, ConfigRedactor redactor)
|
||||
throws IOException {
|
||||
// skip writing if given property name is empty or null
|
||||
if(!Strings.isNullOrEmpty(name) && jsonGen != null) {
|
||||
jsonGen.writeStartObject();
|
||||
jsonGen.writeStringField("key", name);
|
||||
jsonGen.writeStringField("value", config.get(name));
|
||||
jsonGen.writeStringField("value",
|
||||
redactor.redact(name, config.get(name)));
|
||||
jsonGen.writeBooleanField("isFinal",
|
||||
config.finalParameters.contains(name));
|
||||
String[] resources = config.updatingResource.get(name);
|
||||
|
|
|
@ -292,7 +292,9 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
|
|||
}
|
||||
}
|
||||
|
||||
public EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
|
||||
@Override
|
||||
public EncryptedKeyVersion reencryptEncryptedKey(
|
||||
final EncryptedKeyVersion ekv)
|
||||
throws IOException, GeneralSecurityException {
|
||||
try {
|
||||
return doOp(new ProviderCallable<EncryptedKeyVersion>() {
|
||||
|
|
|
@ -377,4 +377,25 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
|||
|
||||
// HDFS client HTrace configuration.
|
||||
public static final String FS_CLIENT_HTRACE_PREFIX = "fs.client.htrace.";
|
||||
|
||||
// Global ZooKeeper configuration keys
|
||||
public static final String ZK_PREFIX = "hadoop.zk.";
|
||||
/** ACL for the ZooKeeper ensemble. */
|
||||
public static final String ZK_ACL = ZK_PREFIX + "acl";
|
||||
public static final String ZK_ACL_DEFAULT = "world:anyone:rwcda";
|
||||
/** Authentication for the ZooKeeper ensemble. */
|
||||
public static final String ZK_AUTH = ZK_PREFIX + "auth";
|
||||
|
||||
/** Address of the ZooKeeper ensemble. */
|
||||
public static final String ZK_ADDRESS = ZK_PREFIX + "address";
|
||||
/** Maximum number of retries for a ZooKeeper operation. */
|
||||
public static final String ZK_NUM_RETRIES = ZK_PREFIX + "num-retries";
|
||||
public static final int ZK_NUM_RETRIES_DEFAULT = 1000;
|
||||
/** Timeout for a ZooKeeper operation in ZooKeeper in milliseconds. */
|
||||
public static final String ZK_TIMEOUT_MS = ZK_PREFIX + "timeout-ms";
|
||||
public static final int ZK_TIMEOUT_MS_DEFAULT = 10000;
|
||||
/** How often to retry a ZooKeeper operation in milliseconds. */
|
||||
public static final String ZK_RETRY_INTERVAL_MS =
|
||||
ZK_PREFIX + "retry-interval-ms";
|
||||
public static final int ZK_RETRY_INTERVAL_MS_DEFAULT = 1000;
|
||||
}
|
||||
|
|
|
@ -608,6 +608,8 @@ public class CommonConfigurationKeysPublic {
|
|||
*/
|
||||
public static final String HADOOP_TOKEN_FILES =
|
||||
"hadoop.token.files";
|
||||
public static final String HADOOP_HTTP_AUTHENTICATION_TYPE =
|
||||
"hadoop.http.authentication.type";
|
||||
|
||||
/**
|
||||
* @see
|
||||
|
|
|
@ -39,9 +39,6 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
@InterfaceStability.Evolving
|
||||
public class DF extends Shell {
|
||||
|
||||
/** Default DF refresh interval. */
|
||||
public static final long DF_INTERVAL_DEFAULT = 3 * 1000;
|
||||
|
||||
private final String dirPath;
|
||||
private final File dirFile;
|
||||
private String filesystem;
|
||||
|
@ -50,7 +47,8 @@ public class DF extends Shell {
|
|||
private ArrayList<String> output;
|
||||
|
||||
public DF(File path, Configuration conf) throws IOException {
|
||||
this(path, conf.getLong(CommonConfigurationKeys.FS_DF_INTERVAL_KEY, DF.DF_INTERVAL_DEFAULT));
|
||||
this(path, conf.getLong(CommonConfigurationKeys.FS_DF_INTERVAL_KEY,
|
||||
CommonConfigurationKeysPublic.FS_DF_INTERVAL_DEFAULT));
|
||||
}
|
||||
|
||||
public DF(File path, long dfInterval) throws IOException {
|
||||
|
@ -214,6 +212,7 @@ public class DF extends Shell {
|
|||
if (args.length > 0)
|
||||
path = args[0];
|
||||
|
||||
System.out.println(new DF(new File(path), DF_INTERVAL_DEFAULT).toString());
|
||||
System.out.println(new DF(new File(path),
|
||||
CommonConfigurationKeysPublic.FS_DF_INTERVAL_DEFAULT).toString());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,17 +17,21 @@
|
|||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
|
||||
|
@ -43,9 +47,45 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
|
|||
* Progressable)}.
|
||||
*
|
||||
* To create missing parent directory, use {@link #recursive()}.
|
||||
*
|
||||
* To be more generic, {@link #opt(String, int)} and {@link #must(String, int)}
|
||||
* variants provide implementation-agnostic way to customize the builder.
|
||||
* Each FS-specific builder implementation can interpret the FS-specific
|
||||
* options accordingly, for example:
|
||||
*
|
||||
* <code>
|
||||
*
|
||||
* // Don't
|
||||
* if (fs instanceof FooFileSystem) {
|
||||
* FooFileSystem fs = (FooFileSystem) fs;
|
||||
* OutputStream out = dfs.createFile(path)
|
||||
* .optionA()
|
||||
* .optionB("value")
|
||||
* .cache()
|
||||
* .build()
|
||||
* } else if (fs instanceof BarFileSystem) {
|
||||
* ...
|
||||
* }
|
||||
*
|
||||
* // Do
|
||||
* OutputStream out = fs.createFile(path)
|
||||
* .permission(perm)
|
||||
* .bufferSize(bufSize)
|
||||
* .opt("foofs:option.a", true)
|
||||
* .opt("foofs:option.b", "value")
|
||||
* .opt("barfs:cache", true)
|
||||
* .must("foofs:cache", true)
|
||||
* .must("barfs:cache-size", 256 * 1024 * 1024)
|
||||
* .build();
|
||||
* </code>
|
||||
*
|
||||
* If the option is not related to the file system, the option will be ignored.
|
||||
* If the option is must, but not supported by the file system, a
|
||||
* {@link IllegalArgumentException} will be thrown.
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public abstract class FSDataOutputStreamBuilder
|
||||
<S extends FSDataOutputStream, B extends FSDataOutputStreamBuilder<S, B>> {
|
||||
private final FileSystem fs;
|
||||
|
@ -60,6 +100,16 @@ public abstract class FSDataOutputStreamBuilder
|
|||
private Progressable progress = null;
|
||||
private ChecksumOpt checksumOpt = null;
|
||||
|
||||
/**
|
||||
* Contains optional and mandatory parameters.
|
||||
*
|
||||
* It does not load default configurations from default files.
|
||||
*/
|
||||
private final Configuration options = new Configuration(false);
|
||||
|
||||
/** Keep track of the keys for mandatory options. */
|
||||
private final Set<String> mandatoryKeys = new HashSet<>();
|
||||
|
||||
/**
|
||||
* Return the concrete implementation of the builder instance.
|
||||
*/
|
||||
|
@ -215,11 +265,154 @@ public abstract class FSDataOutputStreamBuilder
|
|||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional Builder parameter.
|
||||
*/
|
||||
public B opt(@Nonnull final String key, @Nonnull final String value) {
|
||||
mandatoryKeys.remove(key);
|
||||
options.set(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional boolean parameter for the Builder.
|
||||
*
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
public B opt(@Nonnull final String key, boolean value) {
|
||||
mandatoryKeys.remove(key);
|
||||
options.setBoolean(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional int parameter for the Builder.
|
||||
*
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
public B opt(@Nonnull final String key, int value) {
|
||||
mandatoryKeys.remove(key);
|
||||
options.setInt(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional float parameter for the Builder.
|
||||
*
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
public B opt(@Nonnull final String key, float value) {
|
||||
mandatoryKeys.remove(key);
|
||||
options.setFloat(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional double parameter for the Builder.
|
||||
*
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
public B opt(@Nonnull final String key, double value) {
|
||||
mandatoryKeys.remove(key);
|
||||
options.setDouble(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an array of string values as optional parameter for the Builder.
|
||||
*
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
public B opt(@Nonnull final String key, @Nonnull final String... values) {
|
||||
mandatoryKeys.remove(key);
|
||||
options.setStrings(key, values);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory option to the Builder.
|
||||
*
|
||||
* If the option is not supported or unavailable on the {@link FileSystem},
|
||||
* the client should expect {@link #build()} throws IllegalArgumentException.
|
||||
*/
|
||||
public B must(@Nonnull final String key, @Nonnull final String value) {
|
||||
mandatoryKeys.add(key);
|
||||
options.set(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory boolean option.
|
||||
*
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
public B must(@Nonnull final String key, boolean value) {
|
||||
mandatoryKeys.add(key);
|
||||
options.setBoolean(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory int option.
|
||||
*
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
public B must(@Nonnull final String key, int value) {
|
||||
mandatoryKeys.add(key);
|
||||
options.setInt(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory float option.
|
||||
*
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
public B must(@Nonnull final String key, float value) {
|
||||
mandatoryKeys.add(key);
|
||||
options.setFloat(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory double option.
|
||||
*
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
public B must(@Nonnull final String key, double value) {
|
||||
mandatoryKeys.add(key);
|
||||
options.setDouble(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a string array as mandatory option.
|
||||
*
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
public B must(@Nonnull final String key, @Nonnull final String... values) {
|
||||
mandatoryKeys.add(key);
|
||||
options.setStrings(key, values);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
protected Configuration getOptions() {
|
||||
return options;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all the keys that are set as mandatory keys.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
protected Set<String> getMandatoryKeys() {
|
||||
return Collections.unmodifiableSet(mandatoryKeys);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the FSDataOutputStream to write on the file system.
|
||||
*
|
||||
* @throws HadoopIllegalArgumentException if the parameters are not valid.
|
||||
* @throws IllegalArgumentException if the parameters are not valid.
|
||||
* @throws IOException on errors when file system creates or appends the file.
|
||||
*/
|
||||
public abstract S build() throws IOException;
|
||||
public abstract S build() throws IllegalArgumentException, IOException;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.commons.codec.binary.Hex;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.crypto.CipherSuite;
|
||||
|
@ -30,7 +32,9 @@ import static com.google.common.base.Preconditions.checkNotNull;
|
|||
* an encrypted file.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class FileEncryptionInfo {
|
||||
public class FileEncryptionInfo implements Serializable {
|
||||
|
||||
private static final long serialVersionUID = 0x156abe03;
|
||||
|
||||
private final CipherSuite cipherSuite;
|
||||
private final CryptoProtocolVersion version;
|
||||
|
|
|
@ -23,18 +23,22 @@ import java.io.IOException;
|
|||
import java.io.InvalidObjectException;
|
||||
import java.io.ObjectInputValidation;
|
||||
import java.io.Serializable;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.fs.FSProtos.FileStatusProto;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.fs.protocolPB.PBHelper;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
/** Interface that represents the client side information for a file.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class FileStatus implements Writable, Comparable<FileStatus>,
|
||||
public class FileStatus implements Writable, Comparable<Object>,
|
||||
Serializable, ObjectInputValidation {
|
||||
|
||||
private static final long serialVersionUID = 0x13caeae8;
|
||||
|
@ -50,7 +54,31 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
|
|||
private String owner;
|
||||
private String group;
|
||||
private Path symlink;
|
||||
|
||||
private Set<AttrFlags> attr;
|
||||
|
||||
private enum AttrFlags {
|
||||
HAS_ACL,
|
||||
HAS_CRYPT,
|
||||
HAS_EC,
|
||||
};
|
||||
private static final Set<AttrFlags> NONE = Collections.<AttrFlags>emptySet();
|
||||
private static Set<AttrFlags> flags(boolean acl, boolean crypt, boolean ec) {
|
||||
if (!(acl || crypt || ec)) {
|
||||
return NONE;
|
||||
}
|
||||
EnumSet<AttrFlags> ret = EnumSet.noneOf(AttrFlags.class);
|
||||
if (acl) {
|
||||
ret.add(AttrFlags.HAS_ACL);
|
||||
}
|
||||
if (crypt) {
|
||||
ret.add(AttrFlags.HAS_CRYPT);
|
||||
}
|
||||
if (ec) {
|
||||
ret.add(AttrFlags.HAS_EC);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
public FileStatus() { this(0, false, 0, 0, 0, 0, null, null, null, null); }
|
||||
|
||||
//We should deprecate this soon?
|
||||
|
@ -79,6 +107,15 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
|
|||
FsPermission permission, String owner, String group,
|
||||
Path symlink,
|
||||
Path path) {
|
||||
this(length, isdir, block_replication, blocksize, modification_time,
|
||||
access_time, permission, owner, group, symlink, path,
|
||||
false, false, false);
|
||||
}
|
||||
|
||||
public FileStatus(long length, boolean isdir, int block_replication,
|
||||
long blocksize, long modification_time, long access_time,
|
||||
FsPermission permission, String owner, String group, Path symlink,
|
||||
Path path, boolean hasAcl, boolean isEncrypted, boolean isErasureCoded) {
|
||||
this.length = length;
|
||||
this.isdir = isdir;
|
||||
this.block_replication = (short)block_replication;
|
||||
|
@ -89,7 +126,7 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
|
|||
this.permission = permission;
|
||||
} else if (isdir) {
|
||||
this.permission = FsPermission.getDirDefault();
|
||||
} else if (symlink!=null) {
|
||||
} else if (symlink != null) {
|
||||
this.permission = FsPermission.getDefault();
|
||||
} else {
|
||||
this.permission = FsPermission.getFileDefault();
|
||||
|
@ -98,6 +135,8 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
|
|||
this.group = (group == null) ? "" : group;
|
||||
this.symlink = symlink;
|
||||
this.path = path;
|
||||
attr = flags(hasAcl, isEncrypted, isErasureCoded);
|
||||
|
||||
// The variables isdir and symlink indicate the type:
|
||||
// 1. isdir implies directory, in which case symlink must be null.
|
||||
// 2. !isdir implies a file or symlink, symlink != null implies a
|
||||
|
@ -133,7 +172,7 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
|
|||
* @return true if this is a file
|
||||
*/
|
||||
public boolean isFile() {
|
||||
return !isdir && !isSymlink();
|
||||
return !isDirectory() && !isSymlink();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -143,20 +182,20 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
|
|||
public boolean isDirectory() {
|
||||
return isdir;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Old interface, instead use the explicit {@link FileStatus#isFile()},
|
||||
* {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
|
||||
* Old interface, instead use the explicit {@link FileStatus#isFile()},
|
||||
* {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
|
||||
* @return true if this is a directory.
|
||||
* @deprecated Use {@link FileStatus#isFile()},
|
||||
* {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
|
||||
* @deprecated Use {@link FileStatus#isFile()},
|
||||
* {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
|
||||
* instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean isDir() {
|
||||
return isdir;
|
||||
public final boolean isDir() {
|
||||
return isDirectory();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Is this a symbolic link?
|
||||
* @return true if this is a symbolic link
|
||||
|
@ -213,7 +252,7 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
|
|||
* @return true if the underlying file or directory has ACLs set.
|
||||
*/
|
||||
public boolean hasAcl() {
|
||||
return permission.getAclBit();
|
||||
return attr.contains(AttrFlags.HAS_ACL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -222,7 +261,7 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
|
|||
* @return true if the underlying file is encrypted.
|
||||
*/
|
||||
public boolean isEncrypted() {
|
||||
return permission.getEncryptedBit();
|
||||
return attr.contains(AttrFlags.HAS_CRYPT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -231,7 +270,7 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
|
|||
* @return true if the underlying file or directory is erasure coded.
|
||||
*/
|
||||
public boolean isErasureCoded() {
|
||||
return permission.getErasureCodedBit();
|
||||
return attr.contains(AttrFlags.HAS_EC);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -304,47 +343,6 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
|
|||
public void setSymlink(final Path p) {
|
||||
symlink = p;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
// Writable
|
||||
//////////////////////////////////////////////////
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
Text.writeString(out, getPath().toString(), Text.DEFAULT_MAX_LEN);
|
||||
out.writeLong(getLen());
|
||||
out.writeBoolean(isDirectory());
|
||||
out.writeShort(getReplication());
|
||||
out.writeLong(getBlockSize());
|
||||
out.writeLong(getModificationTime());
|
||||
out.writeLong(getAccessTime());
|
||||
getPermission().write(out);
|
||||
Text.writeString(out, getOwner(), Text.DEFAULT_MAX_LEN);
|
||||
Text.writeString(out, getGroup(), Text.DEFAULT_MAX_LEN);
|
||||
out.writeBoolean(isSymlink());
|
||||
if (isSymlink()) {
|
||||
Text.writeString(out, getSymlink().toString(), Text.DEFAULT_MAX_LEN);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
String strPath = Text.readString(in, Text.DEFAULT_MAX_LEN);
|
||||
this.path = new Path(strPath);
|
||||
this.length = in.readLong();
|
||||
this.isdir = in.readBoolean();
|
||||
this.block_replication = in.readShort();
|
||||
blocksize = in.readLong();
|
||||
modification_time = in.readLong();
|
||||
access_time = in.readLong();
|
||||
permission.readFields(in);
|
||||
owner = Text.readString(in, Text.DEFAULT_MAX_LEN);
|
||||
group = Text.readString(in, Text.DEFAULT_MAX_LEN);
|
||||
if (in.readBoolean()) {
|
||||
this.symlink = new Path(Text.readString(in, Text.DEFAULT_MAX_LEN));
|
||||
} else {
|
||||
this.symlink = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare this FileStatus to another FileStatus
|
||||
|
@ -352,26 +350,37 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
|
|||
* @return a negative integer, zero, or a positive integer as this object
|
||||
* is less than, equal to, or greater than the specified object.
|
||||
*/
|
||||
@Override
|
||||
public int compareTo(FileStatus o) {
|
||||
return this.getPath().compareTo(o.getPath());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Compare this FileStatus to another FileStatus.
|
||||
* This method was added back by HADOOP-14683 to keep binary compatibility.
|
||||
*
|
||||
* @param o the FileStatus to be compared.
|
||||
* @return a negative integer, zero, or a positive integer as this object
|
||||
* is less than, equal to, or greater than the specified object.
|
||||
* @throws ClassCastException if the specified object is not FileStatus
|
||||
*/
|
||||
@Override
|
||||
public int compareTo(Object o) {
|
||||
FileStatus other = (FileStatus) o;
|
||||
return compareTo(other);
|
||||
}
|
||||
|
||||
/** Compare if this object is equal to another object
|
||||
* @param o the object to be compared.
|
||||
* @return true if two file status has the same path name; false if not.
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (o == null) {
|
||||
if (!(o instanceof FileStatus)) {
|
||||
return false;
|
||||
}
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (!(o instanceof FileStatus)) {
|
||||
return false;
|
||||
}
|
||||
FileStatus other = (FileStatus)o;
|
||||
return this.getPath().equals(other.getPath());
|
||||
}
|
||||
|
@ -406,7 +415,11 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
|
|||
sb.append("; permission=" + permission);
|
||||
sb.append("; isSymlink=" + isSymlink());
|
||||
if(isSymlink()) {
|
||||
sb.append("; symlink=" + symlink);
|
||||
try {
|
||||
sb.append("; symlink=" + getSymlink());
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Unexpected exception", e);
|
||||
}
|
||||
}
|
||||
sb.append("; hasAcl=" + hasAcl());
|
||||
sb.append("; isEncrypted=" + isEncrypted());
|
||||
|
@ -415,6 +428,54 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
|
|||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Read instance encoded as protobuf from stream.
|
||||
* @param in Input stream
|
||||
* @see PBHelper#convert(FileStatus)
|
||||
* @deprecated Use the {@link PBHelper} and protobuf serialization directly.
|
||||
*/
|
||||
@Override
|
||||
@Deprecated
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
int size = in.readInt();
|
||||
if (size < 0) {
|
||||
throw new IOException("Can't read FileStatusProto with negative " +
|
||||
"size of " + size);
|
||||
}
|
||||
byte[] buf = new byte[size];
|
||||
in.readFully(buf);
|
||||
FileStatusProto proto = FileStatusProto.parseFrom(buf);
|
||||
FileStatus other = PBHelper.convert(proto);
|
||||
isdir = other.isDirectory();
|
||||
length = other.getLen();
|
||||
block_replication = other.getReplication();
|
||||
blocksize = other.getBlockSize();
|
||||
modification_time = other.getModificationTime();
|
||||
access_time = other.getAccessTime();
|
||||
setPermission(other.getPermission());
|
||||
setOwner(other.getOwner());
|
||||
setGroup(other.getGroup());
|
||||
setSymlink((other.isSymlink() ? other.getSymlink() : null));
|
||||
setPath(other.getPath());
|
||||
attr = flags(other.hasAcl(), other.isEncrypted(), other.isErasureCoded());
|
||||
assert (isDirectory() && getSymlink() == null) || !isDirectory();
|
||||
}
|
||||
|
||||
/**
|
||||
* Write instance encoded as protobuf to stream.
|
||||
* @param out Output stream
|
||||
* @see PBHelper#convert(FileStatus)
|
||||
* @deprecated Use the {@link PBHelper} and protobuf serialization directly.
|
||||
*/
|
||||
@Override
|
||||
@Deprecated
|
||||
public void write(DataOutput out) throws IOException {
|
||||
FileStatusProto proto = PBHelper.convert(this);
|
||||
int size = proto.getSerializedSize();
|
||||
out.writeInt(size);
|
||||
out.write(proto.toByteArray());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void validateObject() throws InvalidObjectException {
|
||||
if (null == path) {
|
||||
|
|
|
@ -4153,9 +4153,21 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
|
||||
@Override
|
||||
public FSDataOutputStream build() throws IOException {
|
||||
return getFS().create(getPath(), getPermission(), getFlags(),
|
||||
getBufferSize(), getReplication(), getBlockSize(), getProgress(),
|
||||
getChecksumOpt());
|
||||
if (getFlags().contains(CreateFlag.CREATE) ||
|
||||
getFlags().contains(CreateFlag.OVERWRITE)) {
|
||||
if (isRecursive()) {
|
||||
return getFS().create(getPath(), getPermission(), getFlags(),
|
||||
getBufferSize(), getReplication(), getBlockSize(), getProgress(),
|
||||
getChecksumOpt());
|
||||
} else {
|
||||
return getFS().createNonRecursive(getPath(), getPermission(),
|
||||
getFlags(), getBufferSize(), getReplication(), getBlockSize(),
|
||||
getProgress());
|
||||
}
|
||||
} else if (getFlags().contains(CreateFlag.APPEND)) {
|
||||
return getFS().append(getPath(), getBufferSize(), getProgress());
|
||||
}
|
||||
throw new IOException("Must specify either create, overwrite or append");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -4174,8 +4186,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
* HADOOP-14384. Temporarily reduce the visibility of method before the
|
||||
* builder interface becomes stable.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
protected FSDataOutputStreamBuilder createFile(Path path) {
|
||||
public FSDataOutputStreamBuilder createFile(Path path) {
|
||||
return new FileSystemDataOutputStreamBuilder(this, path)
|
||||
.create().overwrite(true);
|
||||
}
|
||||
|
@ -4185,8 +4196,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
* @param path file path.
|
||||
* @return a {@link FSDataOutputStreamBuilder} to build file append request.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
protected FSDataOutputStreamBuilder appendFile(Path path) {
|
||||
public FSDataOutputStreamBuilder appendFile(Path path) {
|
||||
return new FileSystemDataOutputStreamBuilder(this, path).append();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -306,8 +306,8 @@ public class FileUtil {
|
|||
Path dst)
|
||||
throws IOException {
|
||||
if (srcFS == dstFS) {
|
||||
String srcq = src.makeQualified(srcFS).toString() + Path.SEPARATOR;
|
||||
String dstq = dst.makeQualified(dstFS).toString() + Path.SEPARATOR;
|
||||
String srcq = srcFS.makeQualified(src).toString() + Path.SEPARATOR;
|
||||
String dstq = dstFS.makeQualified(dst).toString() + Path.SEPARATOR;
|
||||
if (dstq.startsWith(srcq)) {
|
||||
if (srcq.length() == dstq.length()) {
|
||||
throw new IOException("Cannot copy " + src + " to itself.");
|
||||
|
|
|
@ -23,6 +23,10 @@ import java.net.URISyntaxException;
|
|||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -33,6 +37,8 @@ import org.apache.hadoop.conf.Configuration;
|
|||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
class FsUrlConnection extends URLConnection {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(FsUrlConnection.class);
|
||||
|
||||
private Configuration conf;
|
||||
|
||||
|
@ -40,12 +46,16 @@ class FsUrlConnection extends URLConnection {
|
|||
|
||||
FsUrlConnection(Configuration conf, URL url) {
|
||||
super(url);
|
||||
Preconditions.checkArgument(conf != null, "null conf argument");
|
||||
Preconditions.checkArgument(url != null, "null url argument");
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void connect() throws IOException {
|
||||
Preconditions.checkState(is == null, "Already connected");
|
||||
try {
|
||||
LOG.debug("Connecting to {}", url);
|
||||
FileSystem fs = FileSystem.get(url.toURI(), conf);
|
||||
is = fs.open(new Path(url.getPath()));
|
||||
} catch (URISyntaxException e) {
|
||||
|
|
|
@ -22,6 +22,9 @@ import java.net.URLStreamHandlerFactory;
|
|||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -41,6 +44,18 @@ import org.apache.hadoop.conf.Configuration;
|
|||
public class FsUrlStreamHandlerFactory implements
|
||||
URLStreamHandlerFactory {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(FsUrlStreamHandlerFactory.class);
|
||||
|
||||
/**
|
||||
* These are the protocols with MUST NOT be exported, as doing so
|
||||
* would conflict with the standard URL handlers registered by
|
||||
* the JVM. Many things will break.
|
||||
*/
|
||||
public static final String[] UNEXPORTED_PROTOCOLS = {
|
||||
"http", "https"
|
||||
};
|
||||
|
||||
// The configuration holds supported FS implementation class names.
|
||||
private Configuration conf;
|
||||
|
||||
|
@ -64,14 +79,20 @@ public class FsUrlStreamHandlerFactory implements
|
|||
throw new RuntimeException(io);
|
||||
}
|
||||
this.handler = new FsUrlStreamHandler(this.conf);
|
||||
for (String protocol : UNEXPORTED_PROTOCOLS) {
|
||||
protocols.put(protocol, false);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public java.net.URLStreamHandler createURLStreamHandler(String protocol) {
|
||||
LOG.debug("Creating handler for protocol {}", protocol);
|
||||
if (!protocols.containsKey(protocol)) {
|
||||
boolean known = true;
|
||||
try {
|
||||
FileSystem.getFileSystemClass(protocol, conf);
|
||||
Class<? extends FileSystem> impl
|
||||
= FileSystem.getFileSystemClass(protocol, conf);
|
||||
LOG.debug("Found implementation of {}: {}", protocol, impl);
|
||||
}
|
||||
catch (IOException ex) {
|
||||
known = false;
|
||||
|
@ -79,9 +100,12 @@ public class FsUrlStreamHandlerFactory implements
|
|||
protocols.put(protocol, known);
|
||||
}
|
||||
if (protocols.get(protocol)) {
|
||||
LOG.debug("Using handler for protocol {}", protocol);
|
||||
return handler;
|
||||
} else {
|
||||
// FileSystem does not know the protocol, let the VM handle this
|
||||
LOG.debug("Unknown protocol {}, delegating to default implementation",
|
||||
protocol);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,9 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class LocatedFileStatus extends FileStatus {
|
||||
|
||||
private static final long serialVersionUID = 0x17339920;
|
||||
|
||||
private BlockLocation[] locations;
|
||||
|
||||
|
||||
|
@ -42,14 +45,18 @@ public class LocatedFileStatus extends FileStatus {
|
|||
* @param stat a file status
|
||||
* @param locations a file's block locations
|
||||
*/
|
||||
public LocatedFileStatus(FileStatus stat, BlockLocation[] locations)
|
||||
throws IOException {
|
||||
public LocatedFileStatus(FileStatus stat, BlockLocation[] locations) {
|
||||
this(stat.getLen(), stat.isDirectory(), stat.getReplication(),
|
||||
stat.getBlockSize(), stat.getModificationTime(),
|
||||
stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
|
||||
stat.getGroup(), null, stat.getPath(), locations);
|
||||
stat.getAccessTime(), stat.getPermission(),
|
||||
stat.getOwner(), stat.getGroup(), null, stat.getPath(),
|
||||
stat.hasAcl(), stat.isEncrypted(), stat.isErasureCoded(), locations);
|
||||
if (stat.isSymlink()) {
|
||||
setSymlink(stat.getSymlink());
|
||||
try {
|
||||
setSymlink(stat.getSymlink());
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Unexpected exception", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -69,24 +76,55 @@ public class LocatedFileStatus extends FileStatus {
|
|||
* @param path the path's qualified name
|
||||
* @param locations a file's block locations
|
||||
*/
|
||||
@Deprecated
|
||||
public LocatedFileStatus(long length, boolean isdir,
|
||||
int block_replication,
|
||||
long blocksize, long modification_time, long access_time,
|
||||
FsPermission permission, String owner, String group,
|
||||
Path symlink,
|
||||
Path path,
|
||||
BlockLocation[] locations) {
|
||||
super(length, isdir, block_replication, blocksize, modification_time,
|
||||
access_time, permission, owner, group, symlink, path);
|
||||
this.locations = locations;
|
||||
Path symlink, Path path, BlockLocation[] locations) {
|
||||
this(length, isdir, block_replication, blocksize, modification_time,
|
||||
access_time, permission, owner, group, symlink, path,
|
||||
permission.getAclBit(), permission.getEncryptedBit(),
|
||||
permission.getErasureCodedBit(), locations);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
*
|
||||
* @param length a file's length
|
||||
* @param isdir if the path is a directory
|
||||
* @param block_replication the file's replication factor
|
||||
* @param blocksize a file's block size
|
||||
* @param modification_time a file's modification time
|
||||
* @param access_time a file's access time
|
||||
* @param permission a file's permission
|
||||
* @param owner a file's owner
|
||||
* @param group a file's group
|
||||
* @param symlink symlink if the path is a symbolic link
|
||||
* @param path the path's qualified name
|
||||
* @param hasAcl entity has associated ACLs
|
||||
* @param isEncrypted entity is encrypted
|
||||
* @param isErasureCoded entity is erasure coded
|
||||
* @param locations a file's block locations
|
||||
*/
|
||||
public LocatedFileStatus(long length, boolean isdir,
|
||||
int block_replication, long blocksize, long modification_time,
|
||||
long access_time, FsPermission permission, String owner, String group,
|
||||
Path symlink, Path path,
|
||||
boolean hasAcl, boolean isEncrypted, boolean isErasureCoded,
|
||||
BlockLocation[] locations) {
|
||||
super(length, isdir, block_replication, blocksize, modification_time,
|
||||
access_time, permission, owner, group, symlink, path,
|
||||
hasAcl, isEncrypted, isErasureCoded);
|
||||
this.locations = locations;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the file's block locations
|
||||
* @return the file's block locations
|
||||
*/
|
||||
public BlockLocation[] getBlockLocations() {
|
||||
return locations;
|
||||
return locations;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -505,7 +505,7 @@ public class FTPFileSystem extends FileSystem {
|
|||
long modTime = -1; // Modification time of root dir not known.
|
||||
Path root = new Path("/");
|
||||
return new FileStatus(length, isDir, blockReplication, blockSize,
|
||||
modTime, root.makeQualified(this));
|
||||
modTime, this.makeQualified(root));
|
||||
}
|
||||
String pathName = parentPath.toUri().getPath();
|
||||
FTPFile[] ftpFiles = client.listFiles(pathName);
|
||||
|
@ -546,7 +546,7 @@ public class FTPFileSystem extends FileSystem {
|
|||
String group = ftpFile.getGroup();
|
||||
Path filePath = new Path(parentPath, ftpFile.getName());
|
||||
return new FileStatus(length, isDir, blockReplication, blockSize, modTime,
|
||||
accessTime, permission, user, group, filePath.makeQualified(this));
|
||||
accessTime, permission, user, group, this.makeQualified(filePath));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -133,11 +133,13 @@ public class FsPermission implements Writable, Serializable,
|
|||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeShort(toShort());
|
||||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
fromShort(in.readShort());
|
||||
}
|
||||
|
@ -161,7 +163,7 @@ public class FsPermission implements Writable, Serializable,
|
|||
*/
|
||||
public static FsPermission read(DataInput in) throws IOException {
|
||||
FsPermission p = new FsPermission();
|
||||
p.readFields(in);
|
||||
p.fromShort(in.readShort());
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -184,6 +186,7 @@ public class FsPermission implements Writable, Serializable,
|
|||
*
|
||||
* @return short extended short representation of this permission
|
||||
*/
|
||||
@Deprecated
|
||||
public short toExtendedShort() {
|
||||
return toShort();
|
||||
}
|
||||
|
@ -299,7 +302,10 @@ public class FsPermission implements Writable, Serializable,
|
|||
* Returns true if there is also an ACL (access control list).
|
||||
*
|
||||
* @return boolean true if there is also an ACL (access control list).
|
||||
* @deprecated Get acl bit from the {@link org.apache.hadoop.fs.FileStatus}
|
||||
* object.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean getAclBit() {
|
||||
// File system subclasses that support the ACL bit would override this.
|
||||
return false;
|
||||
|
@ -307,14 +313,20 @@ public class FsPermission implements Writable, Serializable,
|
|||
|
||||
/**
|
||||
* Returns true if the file is encrypted or directory is in an encryption zone
|
||||
* @deprecated Get encryption bit from the
|
||||
* {@link org.apache.hadoop.fs.FileStatus} object.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean getEncryptedBit() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the file or directory is erasure coded.
|
||||
* @deprecated Get ec bit from the {@link org.apache.hadoop.fs.FileStatus}
|
||||
* object.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean getErasureCodedBit() {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,131 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.protocolPB;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.apache.hadoop.fs.FSProtos.*;
|
||||
|
||||
/**
|
||||
* Utility methods aiding conversion of fs data structures.
|
||||
*/
|
||||
public final class PBHelper {
|
||||
|
||||
private PBHelper() {
|
||||
// prevent construction
|
||||
}
|
||||
|
||||
public static FsPermission convert(FsPermissionProto proto)
|
||||
throws IOException {
|
||||
return new FsPermission((short)proto.getPerm());
|
||||
}
|
||||
|
||||
public static FsPermissionProto convert(FsPermission p) throws IOException {
|
||||
FsPermissionProto.Builder bld = FsPermissionProto.newBuilder();
|
||||
bld.setPerm(p.toShort());
|
||||
return bld.build();
|
||||
}
|
||||
|
||||
public static FileStatus convert(FileStatusProto proto) throws IOException {
|
||||
final Path path;
|
||||
final long length;
|
||||
final boolean isdir;
|
||||
final short blockReplication;
|
||||
final long blocksize;
|
||||
final long mtime;
|
||||
final long atime;
|
||||
final String owner;
|
||||
final String group;
|
||||
final FsPermission permission;
|
||||
final Path symlink;
|
||||
switch (proto.getFileType()) {
|
||||
case FT_DIR:
|
||||
isdir = true;
|
||||
symlink = null;
|
||||
blocksize = 0;
|
||||
length = 0;
|
||||
blockReplication = 0;
|
||||
break;
|
||||
case FT_SYMLINK:
|
||||
isdir = false;
|
||||
symlink = new Path(proto.getSymlink());
|
||||
blocksize = 0;
|
||||
length = 0;
|
||||
blockReplication = 0;
|
||||
break;
|
||||
case FT_FILE:
|
||||
isdir = false;
|
||||
symlink = null;
|
||||
blocksize = proto.getBlockSize();
|
||||
length = proto.getLength();
|
||||
int brep = proto.getBlockReplication();
|
||||
if ((brep & 0xffff0000) != 0) {
|
||||
throw new IOException(String.format("Block replication 0x%08x " +
|
||||
"doesn't fit in 16 bits.", brep));
|
||||
}
|
||||
blockReplication = (short)brep;
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unknown type: " + proto.getFileType());
|
||||
}
|
||||
path = new Path(proto.getPath());
|
||||
mtime = proto.getModificationTime();
|
||||
atime = proto.getAccessTime();
|
||||
permission = convert(proto.getPermission());
|
||||
owner = proto.getOwner();
|
||||
group = proto.getGroup();
|
||||
int flags = proto.getFlags();
|
||||
return new FileStatus(length, isdir, blockReplication, blocksize,
|
||||
mtime, atime, permission, owner, group, symlink, path,
|
||||
(flags & FileStatusProto.Flags.HAS_ACL_VALUE) != 0,
|
||||
(flags & FileStatusProto.Flags.HAS_CRYPT_VALUE) != 0,
|
||||
(flags & FileStatusProto.Flags.HAS_EC_VALUE) != 0);
|
||||
}
|
||||
|
||||
public static FileStatusProto convert(FileStatus stat) throws IOException {
|
||||
FileStatusProto.Builder bld = FileStatusProto.newBuilder();
|
||||
bld.setPath(stat.getPath().toString());
|
||||
if (stat.isDirectory()) {
|
||||
bld.setFileType(FileStatusProto.FileType.FT_DIR);
|
||||
} else if (stat.isSymlink()) {
|
||||
bld.setFileType(FileStatusProto.FileType.FT_SYMLINK)
|
||||
.setSymlink(stat.getSymlink().toString());
|
||||
} else {
|
||||
bld.setFileType(FileStatusProto.FileType.FT_FILE)
|
||||
.setLength(stat.getLen())
|
||||
.setBlockReplication(stat.getReplication())
|
||||
.setBlockSize(stat.getBlockSize());
|
||||
}
|
||||
bld.setAccessTime(stat.getAccessTime())
|
||||
.setModificationTime(stat.getModificationTime())
|
||||
.setOwner(stat.getOwner())
|
||||
.setGroup(stat.getGroup())
|
||||
.setPermission(convert(stat.getPermission()));
|
||||
int flags = 0;
|
||||
flags |= stat.hasAcl() ? FileStatusProto.Flags.HAS_ACL_VALUE : 0;
|
||||
flags |= stat.isEncrypted() ? FileStatusProto.Flags.HAS_CRYPT_VALUE : 0;
|
||||
flags |= stat.isErasureCoded() ? FileStatusProto.Flags.HAS_EC_VALUE : 0;
|
||||
bld.setFlags(flags);
|
||||
return bld.build();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.protocolPB;
|
|
@ -326,8 +326,10 @@ public class SFTPFileSystem extends FileSystem {
|
|||
String parentDir = parent.toUri().getPath();
|
||||
boolean succeeded = true;
|
||||
try {
|
||||
final String previousCwd = client.pwd();
|
||||
client.cd(parentDir);
|
||||
client.mkdir(pathName);
|
||||
client.cd(previousCwd);
|
||||
} catch (SftpException e) {
|
||||
throw new IOException(String.format(E_MAKE_DIR_FORPATH, pathName,
|
||||
parentDir));
|
||||
|
@ -474,8 +476,10 @@ public class SFTPFileSystem extends FileSystem {
|
|||
}
|
||||
boolean renamed = true;
|
||||
try {
|
||||
final String previousCwd = channel.pwd();
|
||||
channel.cd("/");
|
||||
channel.rename(src.toUri().getPath(), dst.toUri().getPath());
|
||||
channel.cd(previousCwd);
|
||||
} catch (SftpException e) {
|
||||
renamed = false;
|
||||
}
|
||||
|
@ -558,8 +562,10 @@ public class SFTPFileSystem extends FileSystem {
|
|||
}
|
||||
OutputStream os;
|
||||
try {
|
||||
final String previousCwd = client.pwd();
|
||||
client.cd(parent.toUri().getPath());
|
||||
os = client.put(f.getName());
|
||||
client.cd(previousCwd);
|
||||
} catch (SftpException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
|
|
|
@ -86,9 +86,9 @@ class AclCommands extends FsCommand {
|
|||
(perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T"));
|
||||
}
|
||||
|
||||
AclStatus aclStatus = null;
|
||||
List<AclEntry> entries = null;
|
||||
if (perm.getAclBit()) {
|
||||
final AclStatus aclStatus;
|
||||
final List<AclEntry> entries;
|
||||
if (item.stat.hasAcl()) {
|
||||
aclStatus = item.fs.getAclStatus(item.path);
|
||||
entries = aclStatus.getEntries();
|
||||
} else {
|
||||
|
|
|
@ -444,8 +444,8 @@ abstract class CommandWithDestination extends FsCommand {
|
|||
src.stat.getPermission());
|
||||
}
|
||||
if (shouldPreserve(FileAttribute.ACL)) {
|
||||
FsPermission perm = src.stat.getPermission();
|
||||
if (perm.getAclBit()) {
|
||||
if (src.stat.hasAcl()) {
|
||||
FsPermission perm = src.stat.getPermission();
|
||||
List<AclEntry> srcEntries =
|
||||
src.fs.getAclStatus(src.path).getEntries();
|
||||
List<AclEntry> srcFullEntries =
|
||||
|
|
|
@ -255,7 +255,7 @@ class Ls extends FsCommand {
|
|||
ContentSummary contentSummary = item.fs.getContentSummary(item.path);
|
||||
String line = String.format(lineFormat,
|
||||
(stat.isDirectory() ? "d" : "-"),
|
||||
stat.getPermission() + (stat.getPermission().getAclBit() ? "+" : " "),
|
||||
stat.getPermission() + (stat.hasAcl() ? "+" : " "),
|
||||
(stat.isFile() ? stat.getReplication() : "-"),
|
||||
stat.getOwner(),
|
||||
stat.getGroup(),
|
||||
|
@ -269,7 +269,7 @@ class Ls extends FsCommand {
|
|||
} else {
|
||||
String line = String.format(lineFormat,
|
||||
(stat.isDirectory() ? "d" : "-"),
|
||||
stat.getPermission() + (stat.getPermission().getAclBit() ? "+" : " "),
|
||||
stat.getPermission() + (stat.hasAcl() ? "+" : " "),
|
||||
(stat.isFile() ? stat.getReplication() : "-"),
|
||||
stat.getOwner(),
|
||||
stat.getGroup(),
|
||||
|
|
|
@ -40,8 +40,10 @@ import org.apache.hadoop.fs.FileStatus;
|
|||
* %o: Block size<br>
|
||||
* %r: replication<br>
|
||||
* %u: User name of owner<br>
|
||||
* %y: UTC date as "yyyy-MM-dd HH:mm:ss"<br>
|
||||
* %Y: Milliseconds since January 1, 1970 UTC<br>
|
||||
* %x: atime UTC date as "yyyy-MM-dd HH:mm:ss"<br>
|
||||
* %X: atime Milliseconds since January 1, 1970 UTC<br>
|
||||
* %y: mtime UTC date as "yyyy-MM-dd HH:mm:ss"<br>
|
||||
* %Y: mtime Milliseconds since January 1, 1970 UTC<br>
|
||||
* If the format is not specified, %y is used by default.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
|
@ -62,9 +64,10 @@ class Stat extends FsCommand {
|
|||
"octal (%a) and symbolic (%A), filesize in" + NEWLINE +
|
||||
"bytes (%b), type (%F), group name of owner (%g)," + NEWLINE +
|
||||
"name (%n), block size (%o), replication (%r), user name" + NEWLINE +
|
||||
"of owner (%u), modification date (%y, %Y)." + NEWLINE +
|
||||
"%y shows UTC date as \"yyyy-MM-dd HH:mm:ss\" and" + NEWLINE +
|
||||
"%Y shows milliseconds since January 1, 1970 UTC." + NEWLINE +
|
||||
"of owner (%u), access date (%x, %X)." + NEWLINE +
|
||||
"modification date (%y, %Y)." + NEWLINE +
|
||||
"%x and %y show UTC date as \"yyyy-MM-dd HH:mm:ss\" and" + NEWLINE +
|
||||
"%X and %Y show milliseconds since January 1, 1970 UTC." + NEWLINE +
|
||||
"If the format is not specified, %y is used by default." + NEWLINE;
|
||||
|
||||
protected final SimpleDateFormat timeFmt;
|
||||
|
@ -127,6 +130,12 @@ class Stat extends FsCommand {
|
|||
case 'u':
|
||||
buf.append(stat.getOwner());
|
||||
break;
|
||||
case 'x':
|
||||
buf.append(timeFmt.format(new Date(stat.getAccessTime())));
|
||||
break;
|
||||
case 'X':
|
||||
buf.append(stat.getAccessTime());
|
||||
break;
|
||||
case 'y':
|
||||
buf.append(timeFmt.format(new Date(stat.getModificationTime())));
|
||||
break;
|
||||
|
|
|
@ -61,13 +61,7 @@ class ViewFsFileStatus extends FileStatus {
|
|||
public boolean isDirectory() {
|
||||
return myFs.isDirectory();
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("deprecation")
|
||||
public boolean isDir() {
|
||||
return myFs.isDirectory();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isSymlink() {
|
||||
return myFs.isSymlink();
|
||||
|
|
|
@ -49,12 +49,6 @@ class ViewFsLocatedFileStatus extends LocatedFileStatus {
|
|||
return myFs.isDirectory();
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("deprecation")
|
||||
public boolean isDir() {
|
||||
return myFs.isDirectory();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSymlink() {
|
||||
return myFs.isSymlink();
|
||||
|
|
|
@ -128,6 +128,10 @@ public final class HttpServer2 implements FilterContainer {
|
|||
public static final String HTTP_MAX_RESPONSE_HEADER_SIZE_KEY =
|
||||
"hadoop.http.max.response.header.size";
|
||||
public static final int HTTP_MAX_RESPONSE_HEADER_SIZE_DEFAULT = 65536;
|
||||
|
||||
public static final String HTTP_SOCKET_BACKLOG_SIZE_KEY =
|
||||
"hadoop.http.socket.backlog.size";
|
||||
public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 128;
|
||||
public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
|
||||
public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
|
||||
|
||||
|
@ -433,6 +437,9 @@ public final class HttpServer2 implements FilterContainer {
|
|||
httpConfig.setResponseHeaderSize(responseHeaderSize);
|
||||
httpConfig.setSendServerVersion(false);
|
||||
|
||||
int backlogSize = conf.getInt(HTTP_SOCKET_BACKLOG_SIZE_KEY,
|
||||
HTTP_SOCKET_BACKLOG_SIZE_DEFAULT);
|
||||
|
||||
for (URI ep : endpoints) {
|
||||
final ServerConnector connector;
|
||||
String scheme = ep.getScheme();
|
||||
|
@ -448,6 +455,7 @@ public final class HttpServer2 implements FilterContainer {
|
|||
}
|
||||
connector.setHost(ep.getHost());
|
||||
connector.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
|
||||
connector.setAcceptQueueSize(backlogSize);
|
||||
server.addListener(connector);
|
||||
}
|
||||
server.loadListeners();
|
||||
|
@ -640,7 +648,6 @@ public final class HttpServer2 implements FilterContainer {
|
|||
|
||||
private static void configureChannelConnector(ServerConnector c) {
|
||||
c.setIdleTimeout(10000);
|
||||
c.setAcceptQueueSize(128);
|
||||
if(Shell.WINDOWS) {
|
||||
// result of setting the SO_REUSEADDR flag is different on Windows
|
||||
// http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
|
||||
|
|
|
@ -293,7 +293,7 @@ public class IOUtils {
|
|||
*/
|
||||
public static void closeStream(java.io.Closeable stream) {
|
||||
if (stream != null) {
|
||||
cleanup(null, stream);
|
||||
cleanupWithLogger(null, stream);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1883,7 +1883,7 @@ public class SequenceFile {
|
|||
@Deprecated
|
||||
public Reader(FileSystem fs, Path file,
|
||||
Configuration conf) throws IOException {
|
||||
this(conf, file(file.makeQualified(fs)));
|
||||
this(conf, file(fs.makeQualified(file)));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -176,7 +176,7 @@ public final class CodecRegistry {
|
|||
* @return a map of all codec names, and their corresponding code list
|
||||
* separated by ','.
|
||||
*/
|
||||
public HashMap<String, String> getCodec2CoderCompactMap() {
|
||||
public Map<String, String> getCodec2CoderCompactMap() {
|
||||
return coderNameCompactMap;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.io.erasurecode;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
@ -31,7 +32,10 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public final class ECSchema {
|
||||
public final class ECSchema implements Serializable {
|
||||
|
||||
private static final long serialVersionUID = 0x10953aa0;
|
||||
|
||||
public static final String NUM_DATA_UNITS_KEY = "numDataUnits";
|
||||
public static final String NUM_PARITY_UNITS_KEY = "numParityUnits";
|
||||
public static final String CODEC_NAME_KEY = "codec";
|
||||
|
|
|
@ -30,6 +30,7 @@ public final class ErasureCodeConstants {
|
|||
public static final String RS_LEGACY_CODEC_NAME = "rs-legacy";
|
||||
public static final String XOR_CODEC_NAME = "xor";
|
||||
public static final String HHXOR_CODEC_NAME = "hhxor";
|
||||
public static final String REPLICATION_CODEC_NAME = "replication";
|
||||
|
||||
public static final ECSchema RS_6_3_SCHEMA = new ECSchema(
|
||||
RS_CODEC_NAME, 6, 3);
|
||||
|
@ -45,4 +46,11 @@ public final class ErasureCodeConstants {
|
|||
|
||||
public static final ECSchema RS_10_4_SCHEMA = new ECSchema(
|
||||
RS_CODEC_NAME, 10, 4);
|
||||
|
||||
public static final ECSchema REPLICATION_1_2_SCHEMA = new ECSchema(
|
||||
REPLICATION_CODEC_NAME, 1, 2);
|
||||
|
||||
public static final byte USER_DEFINED_POLICY_START_ID = (byte) 64;
|
||||
public static final byte REPLICATION_POLICY_ID = (byte) 63;
|
||||
public static final String REPLICATION_POLICY_NAME = REPLICATION_CODEC_NAME;
|
||||
}
|
||||
|
|
|
@ -295,6 +295,17 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
|
|||
|
||||
return new RetryInfo(maxRetryDelay, max, expectedFailoverCount, ex);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "RetryInfo{" +
|
||||
"retryTime=" + retryTime +
|
||||
", delay=" + delay +
|
||||
", action=" + action +
|
||||
", expectedFailoverCount=" + expectedFailoverCount +
|
||||
", failException=" + failException +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
private final ProxyDescriptor<T> proxyDescriptor;
|
||||
|
|
|
@ -74,7 +74,9 @@ public class CompositeGroupsMapping
|
|||
try {
|
||||
groups = provider.getGroups(user);
|
||||
} catch (Exception e) {
|
||||
//LOG.warn("Exception trying to get groups for user " + user, e);
|
||||
LOG.warn("Unable to get groups for user {} via {} because: {}",
|
||||
user, provider.getClass().getSimpleName(), e.toString());
|
||||
LOG.debug("Stacktrace: ", e);
|
||||
}
|
||||
if (groups != null && ! groups.isEmpty()) {
|
||||
groupSet.addAll(groups);
|
||||
|
|
|
@ -23,6 +23,9 @@ import java.util.List;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.slf4j.impl.Log4jLoggerAdapter;
|
||||
|
||||
/**
|
||||
* Contains utility methods for dealing with Java Generics.
|
||||
|
@ -72,4 +75,16 @@ public class GenericsUtil {
|
|||
return toArray(getClass(list.get(0)), list);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether the log of <code>clazz</code> is Log4j implementation.
|
||||
* @param clazz a class to be determined
|
||||
* @return true if the log of <code>clazz</code> is Log4j implementation.
|
||||
*/
|
||||
public static boolean isLog4jLogger(Class<?> clazz) {
|
||||
if (clazz == null) {
|
||||
return false;
|
||||
}
|
||||
Logger log = LoggerFactory.getLogger(clazz);
|
||||
return log instanceof Log4jLoggerAdapter;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,340 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.util.curator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.curator.framework.AuthInfo;
|
||||
import org.apache.curator.framework.CuratorFramework;
|
||||
import org.apache.curator.framework.CuratorFrameworkFactory;
|
||||
import org.apache.curator.retry.RetryNTimes;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.util.ZKUtil;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.data.ACL;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* Helper class that provides utility methods specific to ZK operations.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public final class ZKCuratorManager {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(ZKCuratorManager.class);
|
||||
|
||||
/** Configuration for the ZooKeeper connection. */
|
||||
private final Configuration conf;
|
||||
|
||||
/** Curator for ZooKeeper. */
|
||||
private CuratorFramework curator;
|
||||
|
||||
|
||||
public ZKCuratorManager(Configuration config) throws IOException {
|
||||
this.conf = config;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the curator framework managing the ZooKeeper connection.
|
||||
* @return Curator framework.
|
||||
*/
|
||||
public CuratorFramework getCurator() {
|
||||
return curator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the connection with ZooKeeper.
|
||||
*/
|
||||
public void close() {
|
||||
if (curator != null) {
|
||||
curator.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility method to fetch the ZK ACLs from the configuration.
|
||||
* @throws java.io.IOException if the Zookeeper ACLs configuration file
|
||||
* cannot be read
|
||||
*/
|
||||
public static List<ACL> getZKAcls(Configuration conf) throws IOException {
|
||||
// Parse authentication from configuration.
|
||||
String zkAclConf = conf.get(CommonConfigurationKeys.ZK_ACL,
|
||||
CommonConfigurationKeys.ZK_ACL_DEFAULT);
|
||||
try {
|
||||
zkAclConf = ZKUtil.resolveConfIndirection(zkAclConf);
|
||||
return ZKUtil.parseACLs(zkAclConf);
|
||||
} catch (IOException | ZKUtil.BadAclFormatException e) {
|
||||
LOG.error("Couldn't read ACLs based on {}",
|
||||
CommonConfigurationKeys.ZK_ACL);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility method to fetch ZK auth info from the configuration.
|
||||
* @throws java.io.IOException if the Zookeeper ACLs configuration file
|
||||
* cannot be read
|
||||
*/
|
||||
public static List<ZKUtil.ZKAuthInfo> getZKAuths(Configuration conf)
|
||||
throws IOException {
|
||||
String zkAuthConf = conf.get(CommonConfigurationKeys.ZK_AUTH);
|
||||
try {
|
||||
zkAuthConf = ZKUtil.resolveConfIndirection(zkAuthConf);
|
||||
if (zkAuthConf != null) {
|
||||
return ZKUtil.parseAuth(zkAuthConf);
|
||||
} else {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
} catch (IOException | ZKUtil.BadAuthFormatException e) {
|
||||
LOG.error("Couldn't read Auth based on {}",
|
||||
CommonConfigurationKeys.ZK_AUTH);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the connection to the ZooKeeper ensemble.
|
||||
* @param conf Configuration for the connection.
|
||||
* @throws IOException If the connection cannot be started.
|
||||
*/
|
||||
public void start() throws IOException {
|
||||
this.start(new ArrayList<>());
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the connection to the ZooKeeper ensemble.
|
||||
* @param conf Configuration for the connection.
|
||||
* @param authInfos List of authentication keys.
|
||||
* @throws IOException If the connection cannot be started.
|
||||
*/
|
||||
public void start(List<AuthInfo> authInfos) throws IOException {
|
||||
|
||||
// Connect to the ZooKeeper ensemble
|
||||
String zkHostPort = conf.get(CommonConfigurationKeys.ZK_ADDRESS);
|
||||
if (zkHostPort == null) {
|
||||
throw new IOException(
|
||||
CommonConfigurationKeys.ZK_ADDRESS + " is not configured.");
|
||||
}
|
||||
int numRetries = conf.getInt(CommonConfigurationKeys.ZK_NUM_RETRIES,
|
||||
CommonConfigurationKeys.ZK_NUM_RETRIES_DEFAULT);
|
||||
int zkSessionTimeout = conf.getInt(CommonConfigurationKeys.ZK_TIMEOUT_MS,
|
||||
CommonConfigurationKeys.ZK_TIMEOUT_MS_DEFAULT);
|
||||
int zkRetryInterval = conf.getInt(
|
||||
CommonConfigurationKeys.ZK_RETRY_INTERVAL_MS,
|
||||
CommonConfigurationKeys.ZK_RETRY_INTERVAL_MS_DEFAULT);
|
||||
RetryNTimes retryPolicy = new RetryNTimes(numRetries, zkRetryInterval);
|
||||
|
||||
// Set up ZK auths
|
||||
List<ZKUtil.ZKAuthInfo> zkAuths = getZKAuths(conf);
|
||||
if (authInfos == null) {
|
||||
authInfos = new ArrayList<>();
|
||||
}
|
||||
for (ZKUtil.ZKAuthInfo zkAuth : zkAuths) {
|
||||
authInfos.add(new AuthInfo(zkAuth.getScheme(), zkAuth.getAuth()));
|
||||
}
|
||||
|
||||
CuratorFramework client = CuratorFrameworkFactory.builder()
|
||||
.connectString(zkHostPort)
|
||||
.sessionTimeoutMs(zkSessionTimeout)
|
||||
.retryPolicy(retryPolicy)
|
||||
.authorization(authInfos)
|
||||
.build();
|
||||
client.start();
|
||||
|
||||
this.curator = client;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get ACLs for a ZNode.
|
||||
* @param path Path of the ZNode.
|
||||
* @return The list of ACLs.
|
||||
* @throws Exception
|
||||
*/
|
||||
public List<ACL> getACL(final String path) throws Exception {
|
||||
return curator.getACL().forPath(path);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the data in a ZNode.
|
||||
* @param path Path of the ZNode.
|
||||
* @return The data in the ZNode.
|
||||
* @throws Exception If it cannot contact Zookeeper.
|
||||
*/
|
||||
public byte[] getData(final String path) throws Exception {
|
||||
return curator.getData().forPath(path);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the data in a ZNode.
|
||||
* @param path Path of the ZNode.
|
||||
* @param stat
|
||||
* @return The data in the ZNode.
|
||||
* @throws Exception If it cannot contact Zookeeper.
|
||||
*/
|
||||
public byte[] getData(final String path, Stat stat) throws Exception {
|
||||
return curator.getData().storingStatIn(stat).forPath(path);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the data in a ZNode.
|
||||
* @param path Path of the ZNode.
|
||||
* @return The data in the ZNode.
|
||||
* @throws Exception If it cannot contact Zookeeper.
|
||||
*/
|
||||
public String getStringData(final String path) throws Exception {
|
||||
byte[] bytes = getData(path);
|
||||
return new String(bytes, Charset.forName("UTF-8"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the data in a ZNode.
|
||||
* @param path Path of the ZNode.
|
||||
* @param stat Output statistics of the ZNode.
|
||||
* @return The data in the ZNode.
|
||||
* @throws Exception If it cannot contact Zookeeper.
|
||||
*/
|
||||
public String getStringData(final String path, Stat stat) throws Exception {
|
||||
byte[] bytes = getData(path, stat);
|
||||
return new String(bytes, Charset.forName("UTF-8"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set data into a ZNode.
|
||||
* @param path Path of the ZNode.
|
||||
* @param data Data to set.
|
||||
* @param version Version of the data to store.
|
||||
* @throws Exception If it cannot contact Zookeeper.
|
||||
*/
|
||||
public void setData(String path, byte[] data, int version) throws Exception {
|
||||
curator.setData().withVersion(version).forPath(path, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set data into a ZNode.
|
||||
* @param path Path of the ZNode.
|
||||
* @param data Data to set as String.
|
||||
* @param version Version of the data to store.
|
||||
* @throws Exception If it cannot contact Zookeeper.
|
||||
*/
|
||||
public void setData(String path, String data, int version) throws Exception {
|
||||
byte[] bytes = data.getBytes(Charset.forName("UTF-8"));
|
||||
setData(path, bytes, version);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get children of a ZNode.
|
||||
* @param path Path of the ZNode.
|
||||
* @return The list of children.
|
||||
* @throws Exception If it cannot contact Zookeeper.
|
||||
*/
|
||||
public List<String> getChildren(final String path) throws Exception {
|
||||
return curator.getChildren().forPath(path);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a ZNode exists.
|
||||
* @param path Path of the ZNode.
|
||||
* @return If the ZNode exists.
|
||||
* @throws Exception If it cannot contact Zookeeper.
|
||||
*/
|
||||
public boolean exists(final String path) throws Exception {
|
||||
return curator.checkExists().forPath(path) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a ZNode.
|
||||
* @param path Path of the ZNode.
|
||||
* @return If the ZNode was created.
|
||||
* @throws Exception If it cannot contact Zookeeper.
|
||||
*/
|
||||
public boolean create(final String path) throws Exception {
|
||||
return create(path, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a ZNode.
|
||||
* @param path Path of the ZNode.
|
||||
* @param zkAcl ACL for the node.
|
||||
* @return If the ZNode was created.
|
||||
* @throws Exception If it cannot contact Zookeeper.
|
||||
*/
|
||||
public boolean create(final String path, List<ACL> zkAcl) throws Exception {
|
||||
boolean created = false;
|
||||
if (!exists(path)) {
|
||||
curator.create()
|
||||
.withMode(CreateMode.PERSISTENT)
|
||||
.withACL(zkAcl)
|
||||
.forPath(path, null);
|
||||
created = true;
|
||||
}
|
||||
return created;
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility function to ensure that the configured base znode exists.
|
||||
* This recursively creates the znode as well as all of its parents.
|
||||
* @param path Path of the znode to create.
|
||||
* @throws Exception If it cannot create the file.
|
||||
*/
|
||||
public void createRootDirRecursively(String path) throws Exception {
|
||||
String[] pathParts = path.split("/");
|
||||
Preconditions.checkArgument(
|
||||
pathParts.length >= 1 && pathParts[0].isEmpty(),
|
||||
"Invalid path: %s", path);
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
||||
for (int i = 1; i < pathParts.length; i++) {
|
||||
sb.append("/").append(pathParts[i]);
|
||||
create(sb.toString());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a ZNode.
|
||||
* @param path Path of the ZNode.
|
||||
* @return If the znode was deleted.
|
||||
* @throws Exception If it cannot contact ZooKeeper.
|
||||
*/
|
||||
public boolean delete(final String path) throws Exception {
|
||||
if (exists(path)) {
|
||||
curator.delete().deletingChildrenIfNeeded().forPath(path);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the path for a ZNode.
|
||||
* @param root Root of the ZNode.
|
||||
* @param nodeName Name of the ZNode.
|
||||
* @return Path for the ZNode.
|
||||
*/
|
||||
public static String getNodePath(String root, String nodeName) {
|
||||
return root + "/" + nodeName;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* This package provides utilities to interact with Curator ZooKeeper.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
package org.apache.hadoop.util.curator;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
|
@ -0,0 +1,69 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* These .proto interfaces are private and stable.
|
||||
* Please see http://wiki.apache.org/hadoop/Compatibility
|
||||
* for what changes are allowed for a *stable* .proto interface.
|
||||
*/
|
||||
|
||||
option java_package = "org.apache.hadoop.fs";
|
||||
option java_outer_classname = "FSProtos";
|
||||
option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
package hadoop.fs;
|
||||
|
||||
message FsPermissionProto {
|
||||
required uint32 perm = 1; // UNIX-style mode bits
|
||||
}
|
||||
|
||||
/*
|
||||
* FileStatus encoding. Field IDs match those from HdfsFileStatusProto, but
|
||||
* cross-serialization is not an explicitly supported use case. Unlike HDFS,
|
||||
* most fields are optional and do not define defaults.
|
||||
*/
|
||||
message FileStatusProto {
|
||||
enum FileType {
|
||||
FT_DIR = 1;
|
||||
FT_FILE = 2;
|
||||
FT_SYMLINK = 3;
|
||||
}
|
||||
enum Flags {
|
||||
HAS_ACL = 0x01; // has ACLs
|
||||
HAS_CRYPT = 0x02; // encrypted
|
||||
HAS_EC = 0x04; // erasure coded
|
||||
}
|
||||
required FileType fileType = 1;
|
||||
required string path = 2;
|
||||
optional uint64 length = 3;
|
||||
optional FsPermissionProto permission = 4;
|
||||
optional string owner = 5;
|
||||
optional string group = 6;
|
||||
optional uint64 modification_time = 7;
|
||||
optional uint64 access_time = 8;
|
||||
optional string symlink = 9;
|
||||
optional uint32 block_replication = 10;
|
||||
optional uint64 block_size = 11;
|
||||
// locations = 12
|
||||
// alias = 13
|
||||
// childrenNum = 14
|
||||
optional bytes encryption_data = 15;
|
||||
// storagePolicy = 16
|
||||
optional bytes ec_data = 17;
|
||||
optional uint32 flags = 18 [default = 0];
|
||||
}
|
|
@ -1321,6 +1321,18 @@
|
|||
</property>
|
||||
|
||||
<!-- Azure file system properties -->
|
||||
<property>
|
||||
<name>fs.wasb.impl</name>
|
||||
<value>org.apache.hadoop.fs.azure.NativeAzureFileSystem</value>
|
||||
<description>The implementation class of the Native Azure Filesystem</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.wasbs.impl</name>
|
||||
<value>org.apache.hadoop.fs.azure.NativeAzureFileSystem$Secure</value>
|
||||
<description>The implementation class of the Secure Native Azure Filesystem</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.azure.secure.mode</name>
|
||||
<value>false</value>
|
||||
|
@ -2574,11 +2586,16 @@
|
|||
<value>ClientCredential</value>
|
||||
<description>
|
||||
Defines Azure Active Directory OAuth2 access token provider type.
|
||||
Supported types are ClientCredential, RefreshToken, and Custom.
|
||||
Supported types are ClientCredential, RefreshToken, MSI, DeviceCode,
|
||||
and Custom.
|
||||
The ClientCredential type requires property fs.adl.oauth2.client.id,
|
||||
fs.adl.oauth2.credential, and fs.adl.oauth2.refresh.url.
|
||||
The RefreshToken type requires property fs.adl.oauth2.client.id and
|
||||
fs.adl.oauth2.refresh.token.
|
||||
The MSI type requires properties fs.adl.oauth2.msi.port and
|
||||
fs.adl.oauth2.msi.tenantguid.
|
||||
The DeviceCode type requires property
|
||||
fs.adl.oauth2.devicecode.clientapp.id.
|
||||
The Custom type requires property fs.adl.oauth2.access.token.provider.
|
||||
</description>
|
||||
</property>
|
||||
|
@ -2615,6 +2632,36 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.adl.oauth2.msi.port</name>
|
||||
<value></value>
|
||||
<description>
|
||||
The localhost port for the MSI token service. This is the port specified
|
||||
when creating the Azure VM.
|
||||
Used by MSI token provider.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.adl.oauth2.msi.tenantguid</name>
|
||||
<value></value>
|
||||
<description>
|
||||
The tenant guid for the Azure AAD tenant under which the azure data lake
|
||||
store account is created.
|
||||
Used by MSI token provider.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.adl.oauth2.devicecode.clientapp.id</name>
|
||||
<value></value>
|
||||
<description>
|
||||
The app id of the AAD native app in whose context the auth request
|
||||
should be made.
|
||||
Used by DeviceCode token provider.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<!-- Azure Data Lake File System Configurations Ends Here-->
|
||||
|
||||
<property>
|
||||
|
@ -2663,4 +2710,50 @@
|
|||
This determines the number of open file handles.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<description>Host:Port of the ZooKeeper server to be used.
|
||||
</description>
|
||||
<name>hadoop.zk.address</name>
|
||||
<!--value>127.0.0.1:2181</value-->
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<description>Number of tries to connect to ZooKeeper.</description>
|
||||
<name>hadoop.zk.num-retries</name>
|
||||
<value>1000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<description>Retry interval in milliseconds when connecting to ZooKeeper.
|
||||
</description>
|
||||
<name>hadoop.zk.retry-interval-ms</name>
|
||||
<value>1000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<description>ZooKeeper session timeout in milliseconds. Session expiration
|
||||
is managed by the ZooKeeper cluster itself, not by the client. This value is
|
||||
used by the cluster to determine when the client's session expires.
|
||||
Expirations happens when the cluster does not hear from the client within
|
||||
the specified session timeout period (i.e. no heartbeat).</description>
|
||||
<name>hadoop.zk.timeout-ms</name>
|
||||
<value>10000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<description>ACL's to be used for ZooKeeper znodes.</description>
|
||||
<name>hadoop.zk.acl</name>
|
||||
<value>world:anyone:rwcda</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<description>
|
||||
Specify the auths to be used for the ACL's specified in hadoop.zk.acl.
|
||||
This takes a comma-separated list of authentication mechanisms, each of the
|
||||
form 'scheme:auth' (the same syntax used for the 'addAuth' command in
|
||||
the ZK CLI).
|
||||
</description>
|
||||
<name>hadoop.zk.auth</name>
|
||||
</property>
|
||||
</configuration>
|
||||
|
|
|
@ -676,11 +676,11 @@ stat
|
|||
|
||||
Usage: `hadoop fs -stat [format] <path> ...`
|
||||
|
||||
Print statistics about the file/directory at \<path\> in the specified format. Format accepts permissions in octal (%a) and symbolic (%A), filesize in bytes (%b), type (%F), group name of owner (%g), name (%n), block size (%o), replication (%r), user name of owner(%u), and modification date (%y, %Y). %y shows UTC date as "yyyy-MM-dd HH:mm:ss" and %Y shows milliseconds since January 1, 1970 UTC. If the format is not specified, %y is used by default.
|
||||
Print statistics about the file/directory at \<path\> in the specified format. Format accepts permissions in octal (%a) and symbolic (%A), filesize in bytes (%b), type (%F), group name of owner (%g), name (%n), block size (%o), replication (%r), user name of owner(%u), access date(%x, %X), and modification date (%y, %Y). %x and %y show UTC date as "yyyy-MM-dd HH:mm:ss", and %X and %Y show milliseconds since January 1, 1970 UTC. If the format is not specified, %y is used by default.
|
||||
|
||||
Example:
|
||||
|
||||
* `hadoop fs -stat "%F %a %u:%g %b %y %n" /file`
|
||||
* `hadoop fs -stat "type:%F perm:%a %u:%g size:%b mtime:%y atime:%x name:%n" /file`
|
||||
|
||||
Exit Code: Returns 0 on success and -1 on error.
|
||||
|
||||
|
|
|
@ -145,6 +145,9 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
|
|||
| `CreateSymlinkOps` | Total number of createSymlink operations |
|
||||
| `GetLinkTargetOps` | Total number of getLinkTarget operations |
|
||||
| `FilesInGetListingOps` | Total number of files and directories listed by directory listing operations |
|
||||
| `SuccessfulReReplications` | Total number of successful block re-replications |
|
||||
| `NumTimesReReplicationNotScheduled` | Total number of times that failed to schedule a block re-replication |
|
||||
| `TimeoutReReplications` | Total number of timed out block re-replications |
|
||||
| `AllowSnapshotOps` | Total number of allowSnapshot operations |
|
||||
| `DisallowSnapshotOps` | Total number of disallowSnapshot operations |
|
||||
| `CreateSnapshotOps` | Total number of createSnapshot operations |
|
||||
|
@ -157,8 +160,8 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
|
|||
| `SyncsNumOps` | Total number of Journal syncs |
|
||||
| `SyncsAvgTime` | Average time of Journal syncs in milliseconds |
|
||||
| `TransactionsBatchedInSync` | Total number of Journal transactions batched in sync |
|
||||
| `BlockReportNumOps` | Total number of processing block reports from DataNode |
|
||||
| `BlockReportAvgTime` | Average time of processing block reports in milliseconds |
|
||||
| `StorageBlockReportNumOps` | Total number of processing block reports from individual storages in DataNode |
|
||||
| `StorageBlockReportAvgTime` | Average time of processing block reports in milliseconds |
|
||||
| `CacheReportNumOps` | Total number of processing cache reports from DataNode |
|
||||
| `CacheReportAvgTime` | Average time of processing cache reports in milliseconds |
|
||||
| `SafeModeTime` | The interval between FSNameSystem starts and the last time safemode leaves in milliseconds. (sometimes not equal to the time in SafeMode, see [HDFS-5156](https://issues.apache.org/jira/browse/HDFS-5156)) |
|
||||
|
@ -176,6 +179,8 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
|
|||
| `GenerateEDEKTimeAvgTime` | Average time of generating EDEK in milliseconds |
|
||||
| `WarmUpEDEKTimeNumOps` | Total number of warming up EDEK |
|
||||
| `WarmUpEDEKTimeAvgTime` | Average time of warming up EDEK in milliseconds |
|
||||
| `ResourceCheckTime`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of NameNode resource check latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||
| `StorageBlockReport`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of storage block report latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||
|
||||
FSNamesystem
|
||||
------------
|
||||
|
@ -213,7 +218,15 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
|
|||
| `PendingDataNodeMessageCount` | (HA-only) Current number of pending block-related messages for later processing in the standby NameNode |
|
||||
| `MillisSinceLastLoadedEdits` | (HA-only) Time in milliseconds since the last time standby NameNode load edit log. In active NameNode, set to 0 |
|
||||
| `BlockCapacity` | Current number of block capacity |
|
||||
| `NumLiveDataNodes` | Number of datanodes which are currently live |
|
||||
| `NumDeadDataNodes` | Number of datanodes which are currently dead |
|
||||
| `NumDecomLiveDataNodes` | Number of datanodes which have been decommissioned and are now live |
|
||||
| `NumDecomDeadDataNodes` | Number of datanodes which have been decommissioned and are now dead |
|
||||
| `NumDecommissioningDataNodes` | Number of datanodes in decommissioning state |
|
||||
| `VolumeFailuresTotal` | Total number of volume failures across all Datanodes |
|
||||
| `EstimatedCapacityLostTotal` | An estimate of the total capacity lost due to volume failures |
|
||||
| `StaleDataNodes` | Current number of DataNodes marked stale due to delayed heartbeat |
|
||||
| `NumStaleStorages` | Number of storages marked as content stale (after NameNode restart/failover before first block report is received) |
|
||||
| `MissingReplOneBlocks` | Current number of missing blocks with replication factor 1 |
|
||||
| `NumFilesUnderConstruction` | Current number of files under construction |
|
||||
| `NumActiveClients` | Current number of active clients holding lease |
|
||||
|
@ -224,6 +237,9 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
|
|||
| `TotalSyncTimes` | Total number of milliseconds spent by various edit logs in sync operation|
|
||||
| `NameDirSize` | NameNode name directories size in bytes |
|
||||
| `NumTimedOutPendingReconstructions` | The number of timed out reconstructions. Not the number of unique blocks that timed out. |
|
||||
| `NumInMaintenanceLiveDataNodes` | Number of live Datanodes which are in maintenance state |
|
||||
| `NumInMaintenanceDeadDataNodes` | Number of dead Datanodes which are in maintenance state |
|
||||
| `NumEnteringMaintenanceDataNodes` | Number of Datanodes that are entering the maintenance state |
|
||||
| `FSN(Read|Write)Lock`*OperationName*`NumOps` | Total number of acquiring lock by operations |
|
||||
| `FSN(Read|Write)Lock`*OperationName*`AvgTime` | Average time of holding the lock by operations in milliseconds |
|
||||
|
||||
|
|
|
@ -142,12 +142,9 @@ In a typical cluster HDFS and YARN services will be launched as the system `hdfs
|
|||
<property>
|
||||
<name>hadoop.security.auth_to_local</name>
|
||||
<value>
|
||||
RULE:[2:$1@$0](nn/.*@.*REALM.TLD)s/.*/hdfs/
|
||||
RULE:[2:$1@$0](jn/.*@.*REALM.TLD)s/.*/hdfs/
|
||||
RULE:[2:$1@$0](dn/.*@.*REALM.TLD)s/.*/hdfs/
|
||||
RULE:[2:$1@$0](nm/.*@.*REALM.TLD)s/.*/yarn/
|
||||
RULE:[2:$1@$0](rm/.*@.*REALM.TLD)s/.*/yarn/
|
||||
RULE:[2:$1@$0](jhs/.*@.*REALM.TLD)s/.*/mapred/
|
||||
RULE:[2:$1/$2@$0]([ndj]n/.*@REALM.TLD)s/.*/hdfs/
|
||||
RULE:[2:$1/$2@$0]([rn]m/.*@REALM.TLD)s/.*/yarn/
|
||||
RULE:[2:$1/$2@$0](jhs/.*@REALM.TLD)s/.*/mapred/
|
||||
DEFAULT
|
||||
</value>
|
||||
</property>
|
||||
|
|
|
@ -78,13 +78,27 @@ A special value of `*` implies that all users are allowed to access the service.
|
|||
|
||||
If access control list is not defined for a service, the value of `security.service.authorization.default.acl` is applied. If `security.service.authorization.default.acl` is not defined, `*` is applied.
|
||||
|
||||
* Blocked Access Control ListsIn some cases, it is required to specify blocked access control list for a service. This specifies the list of users and groups who are not authorized to access the service. The format of the blocked access control list is same as that of access control list. The blocked access control list can be specified via `$HADOOP_CONF_DIR/hadoop-policy.xml`. The property name is derived by suffixing with ".blocked".
|
||||
### Blocked Access Control Lists
|
||||
|
||||
Example: The property name of blocked access control list for `security.client.protocol.acl` will be `security.client.protocol.acl.blocked`
|
||||
In some cases, it is required to specify blocked access control list for a service. This specifies the list of users and groups who are not authorized to access the service. The format of the blocked access control list is same as that of access control list. The blocked access control list can be specified via `$HADOOP_CONF_DIR/hadoop-policy.xml`. The property name is derived by suffixing with ".blocked".
|
||||
|
||||
For a service, it is possible to specify both an access control list and a blocked control list. A user is authorized to access the service if the user is in the access control and not in the blocked access control list.
|
||||
Example: The property name of blocked access control list for `security.client.protocol.acl` will be `security.client.protocol.acl.blocked`
|
||||
|
||||
If blocked access control list is not defined for a service, the value of `security.service.authorization.default.acl.blocked` is applied. If `security.service.authorization.default.acl.blocked` is not defined, empty blocked access control list is applied.
|
||||
For a service, it is possible to specify both an access control list and a blocked control list. A user is authorized to access the service if the user is in the access control and not in the blocked access control list.
|
||||
|
||||
If blocked access control list is not defined for a service, the value of `security.service.authorization.default.acl.blocked` is applied. If `security.service.authorization.default.acl.blocked` is not defined, empty blocked access control list is applied.
|
||||
|
||||
### Access Control using Lists of IP Addresses, Host Names and IP Ranges
|
||||
|
||||
Access to a service can be controlled based on the ip address of the client accessing the service. It is possible to restrict access to a service from a set of machines by specifying a list of ip addresses, host names and ip ranges. The property name for each service is derived from the corresponding acl's property name. If the property name of acl is security.client.protocol.acl, property name for the hosts list will be security.client.protocol.hosts.
|
||||
|
||||
If hosts list is not defined for a service, the value of `security.service.authorization.default.hosts` is applied. If `security.service.authorization.default.hosts` is not defined, `*` is applied.
|
||||
|
||||
It is possible to specify a blocked list of hosts. Only those machines which are in the hosts list, but not in the blocked hosts list will be granted access to the service. The property name is derived by suffixing with ".blocked".
|
||||
|
||||
Example: The property name of blocked hosts list for `security.client.protocol.hosts` will be `security.client.protocol.hosts.blocked`
|
||||
|
||||
If blocked hosts list is not defined for a service, the value of `security.service.authorization.default.hosts.blocked` is applied. If `security.service.authorization.default.hosts.blocked` is not defined, empty blocked hosts list is applied.
|
||||
|
||||
### Refreshing Service Level Authorization Configuration
|
||||
|
||||
|
@ -100,16 +114,6 @@ Refresh the service-level authorization configuration for the ResourceManager:
|
|||
|
||||
Of course, one can use the `security.refresh.policy.protocol.acl` property in `$HADOOP_CONF_DIR/hadoop-policy.xml` to restrict access to the ability to refresh the service-level authorization configuration to certain users/groups.
|
||||
|
||||
* Access Control using list of ip addresses, host names and ip rangesAccess to a service can be controlled based on the ip address of the client accessing the service. It is possible to restrict access to a service from a set of machines by specifying a list of ip addresses, host names and ip ranges. The property name for each service is derived from the corresponding acl's property name. If the property name of acl is security.client.protocol.acl, property name for the hosts list will be security.client.protocol.hosts.
|
||||
|
||||
If hosts list is not defined for a service, the value of `security.service.authorization.default.hosts` is applied. If `security.service.authorization.default.hosts` is not defined, `*` is applied.
|
||||
|
||||
It is possible to specify a blocked list of hosts. Only those machines which are in the hosts list, but not in the blocked hosts list will be granted access to the service. The property name is derived by suffixing with ".blocked".
|
||||
|
||||
Example: The property name of blocked hosts list for `security.client.protocol.hosts` will be `security.client.protocol.hosts.blocked`
|
||||
|
||||
If blocked hosts list is not defined for a service, the value of `security.service.authorization.default.hosts.blocked` is applied. If `security.service.authorization.default.hosts.blocked` is not defined, empty blocked hosts list is applied.
|
||||
|
||||
### Examples
|
||||
|
||||
Allow only users `alice`, `bob` and users in the `mapreduce` group to submit jobs to the MapReduce cluster:
|
||||
|
|
|
@ -180,11 +180,11 @@ It is also possible to add the new subcommands to the usage output. The `hadoop_
|
|||
|
||||
```bash
|
||||
if [[ "${HADOOP_SHELL_EXECNAME}" = "yarn" ]]; then
|
||||
hadoop_add_subcommand "hello" "Print some text to the screen"
|
||||
hadoop_add_subcommand "hello" client "Print some text to the screen"
|
||||
fi
|
||||
```
|
||||
|
||||
This functionality may also be use to override the built-ins. For example, defining:
|
||||
We set the subcommand type to be "client" as there are no special restrictions, extra capabilities, etc. This functionality may also be use to override the built-ins. For example, defining:
|
||||
|
||||
```bash
|
||||
function hdfs_subcommand_fetchdt
|
||||
|
|
|
@ -553,7 +553,7 @@ on a path that exists and is a file. Instead the operation returns false.
|
|||
FS' = FS
|
||||
result = False
|
||||
|
||||
### `FSDataOutputStream create(Path, ...)`
|
||||
### <a name='FileSystem.create'></a> `FSDataOutputStream create(Path, ...)`
|
||||
|
||||
|
||||
FSDataOutputStream create(Path p,
|
||||
|
@ -616,7 +616,24 @@ this precondition fails.
|
|||
|
||||
* Not covered: symlinks. The resolved path of the symlink is used as the final path argument to the `create()` operation
|
||||
|
||||
### `FSDataOutputStream append(Path p, int bufferSize, Progressable progress)`
|
||||
### `FSDataOutputStreamBuilder createFile(Path p)`
|
||||
|
||||
Make a `FSDataOutputStreamBuilder` to specify the parameters to create a file.
|
||||
|
||||
#### Implementation Notes
|
||||
|
||||
`createFile(p)` returns a `FSDataOutputStreamBuilder` only and does not make
|
||||
change on filesystem immediately. When `build()` is invoked on the `FSDataOutputStreamBuilder`,
|
||||
the builder parameters are verified and [`create(Path p)`](#FileSystem.create)
|
||||
is invoked on the underlying filesystem. `build()` has the same preconditions
|
||||
and postconditions as [`create(Path p)`](#FileSystem.create).
|
||||
|
||||
* Similar to [`create(Path p)`](#FileSystem.create), files are overwritten
|
||||
by default, unless specify `builder.overwrite(false)`.
|
||||
* Unlike [`create(Path p)`](#FileSystem.create), missing parent directories are
|
||||
not created by default, unless specify `builder.recursive()`.
|
||||
|
||||
### <a name='FileSystem.append'></a> `FSDataOutputStream append(Path p, int bufferSize, Progressable progress)`
|
||||
|
||||
Implementations without a compliant call SHOULD throw `UnsupportedOperationException`.
|
||||
|
||||
|
@ -634,6 +651,18 @@ Implementations without a compliant call SHOULD throw `UnsupportedOperationExcep
|
|||
Return: `FSDataOutputStream`, which can update the entry `FS.Files[p]`
|
||||
by appending data to the existing list.
|
||||
|
||||
### `FSDataOutputStreamBuilder appendFile(Path p)`
|
||||
|
||||
Make a `FSDataOutputStreamBuilder` to specify the parameters to append to an
|
||||
existing file.
|
||||
|
||||
#### Implementation Notes
|
||||
|
||||
`appendFile(p)` returns a `FSDataOutputStreamBuilder` only and does not make
|
||||
change on filesystem immediately. When `build()` is invoked on the `FSDataOutputStreamBuilder`,
|
||||
the builder parameters are verified and [`append()`](#FileSystem.append) is
|
||||
invoked on the underlying filesystem. `build()` has the same preconditions and
|
||||
postconditions as [`append()`](#FileSystem.append).
|
||||
|
||||
### `FSDataInputStream open(Path f, int bufferSize)`
|
||||
|
||||
|
@ -1210,3 +1239,27 @@ try {
|
|||
It is notable that this is *not* done in the Hadoop codebase. This does not imply
|
||||
that robust loops are not recommended —more that the concurrency
|
||||
problems were not considered during the implementation of these loops.
|
||||
|
||||
|
||||
## <a name="StreamCapability"></a> interface `StreamCapabilities`
|
||||
|
||||
The `StreamCapabilities` provides a way to programmatically query the
|
||||
capabilities that an `OutputStream` supports.
|
||||
|
||||
```java
|
||||
public interface StreamCapabilities {
|
||||
boolean hasCapability(String capability);
|
||||
}
|
||||
```
|
||||
|
||||
### `boolean hasCapability(capability)`
|
||||
|
||||
Return true if the `OutputStream` has the desired capability.
|
||||
|
||||
The caller can query the capabilities of a stream using a string value.
|
||||
It currently supports to query:
|
||||
|
||||
* `StreamCapabilties.HFLUSH` ("*hflush*"): the capability to flush out the data
|
||||
in client's buffer.
|
||||
* `StreamCapabilities.HSYNC` ("*hsync*"): capability to flush out the data in
|
||||
client's buffer and the disk device.
|
|
@ -0,0 +1,182 @@
|
|||
<!---
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
<!-- ============================================================= -->
|
||||
<!-- CLASS: FSDataOutputStreamBuilder -->
|
||||
<!-- ============================================================= -->
|
||||
|
||||
# class `org.apache.hadoop.fs.FSDataOutputStreamBuilder`
|
||||
|
||||
<!-- MACRO{toc|fromDepth=1|toDepth=2} -->
|
||||
|
||||
Builder pattern for `FSDataOutputStream` and its subclasses. It is used to
|
||||
create a new file or open an existing file on `FileSystem` for write.
|
||||
|
||||
## Invariants
|
||||
|
||||
The `FSDataOutputStreamBuilder` interface does not validate parameters
|
||||
and modify the state of `FileSystem` until [`build()`](#Builder.build) is
|
||||
invoked.
|
||||
|
||||
## Implementation-agnostic parameters.
|
||||
|
||||
### <a name="Builder.create"></a> `FSDataOutputStreamBuilder create()`
|
||||
|
||||
Specify `FSDataOutputStreamBuilder` to create a file on `FileSystem`, equivalent
|
||||
to `CreateFlag#CREATE`.
|
||||
|
||||
### <a name="Builder.append"></a> `FSDataOutputStreamBuilder append()`
|
||||
|
||||
Specify `FSDataOutputStreamBuilder` to append to an existing file on
|
||||
`FileSystem`, equivalent to `CreateFlag#APPEND`.
|
||||
|
||||
### <a name="Builder.overwrite"></a> `FSDataOutputStreamBuilder overwrite(boolean overwrite)`
|
||||
|
||||
Specify `FSDataOutputStreamBuilder` to overwrite an existing file or not. If
|
||||
giving `overwrite==true`, it truncates an existing file, equivalent to
|
||||
`CreateFlag#OVERWITE`.
|
||||
|
||||
### <a name="Builder.permission"></a> `FSDataOutputStreamBuilder permission(FsPermission permission)`
|
||||
|
||||
Set permission for the file.
|
||||
|
||||
### <a name="Builder.bufferSize"></a> `FSDataOutputStreamBuilder bufferSize(int bufSize)`
|
||||
|
||||
Set the size of the buffer to be used.
|
||||
|
||||
### <a name="Builder.replication"></a> `FSDataOutputStreamBuilder replication(short replica)`
|
||||
|
||||
Set the replication factor.
|
||||
|
||||
### <a name="Builder.blockSize"></a> `FSDataOutputStreamBuilder blockSize(long size)`
|
||||
|
||||
Set block size in bytes.
|
||||
|
||||
### <a name="Builder.recursive"></a> `FSDataOutputStreamBuilder recursive()`
|
||||
|
||||
Create parent directories if they do not exist.
|
||||
|
||||
### <a name="Builder.progress"></a> `FSDataOutputStreamBuilder progress(Progresable prog)`
|
||||
|
||||
Set the facility of reporting progress.
|
||||
|
||||
### <a name="Builder.checksumOpt"></a> `FSDataOutputStreamBuilder checksumOpt(ChecksumOpt chksumOpt)`
|
||||
|
||||
Set checksum opt.
|
||||
|
||||
### Set optional or mandatory parameters
|
||||
|
||||
FSDataOutputStreamBuilder opt(String key, ...)
|
||||
FSDataOutputStreamBuilder must(String key, ...)
|
||||
|
||||
Set optional or mandatory parameters to the builder. Using `opt()` or `must()`,
|
||||
client can specify FS-specific parameters without inspecting the concrete type
|
||||
of `FileSystem`.
|
||||
|
||||
// Don't
|
||||
if (fs instanceof FooFileSystem) {
|
||||
FooFileSystem fs = (FooFileSystem) fs;
|
||||
out = dfs.createFile(path)
|
||||
.optionA()
|
||||
.optionB("value")
|
||||
.cache()
|
||||
.build()
|
||||
} else if (fs instanceof BarFileSystem) {
|
||||
...
|
||||
}
|
||||
|
||||
// Do
|
||||
out = fs.createFile(path)
|
||||
.permission(perm)
|
||||
.bufferSize(bufSize)
|
||||
.opt("foofs:option.a", true)
|
||||
.opt("foofs:option.b", "value")
|
||||
.opt("barfs:cache", true)
|
||||
.must("foofs:cache", true)
|
||||
.must("barfs:cache-size", 256 * 1024 * 1024)
|
||||
.build();
|
||||
|
||||
#### Implementation Notes
|
||||
|
||||
The concrete `FileSystem` and/or `FSDataOutputStreamBuilder` implementation
|
||||
MUST verify that implementation-agnostic parameters (i.e., "syncable") or
|
||||
implementation-specific parameters (i.e., "foofs:cache")
|
||||
are supported. `FileSystem` will satisfy optional parameters (via `opt(key, ...)`)
|
||||
on best effort. If the mandatory parameters (via `must(key, ...)`) can not be satisfied
|
||||
in the `FileSystem`, `IllegalArgumentException` should be thrown in `build()`.
|
||||
|
||||
The behavior of resolving the conflicts between the parameters set by
|
||||
builder methods (i.e., `bufferSize()`) and `opt()`/`must()` is undefined.
|
||||
|
||||
## HDFS-specific parameters.
|
||||
|
||||
`HdfsDataOutputStreamBuilder extends FSDataOutputStreamBuilder` provides additional
|
||||
HDFS-specific parameters, for further customize file creation / append behavior.
|
||||
|
||||
### `FSDataOutpuStreamBuilder favoredNodes(InetSocketAddress[] nodes)`
|
||||
|
||||
Set favored DataNodes for new blocks.
|
||||
|
||||
### `FSDataOutputStreamBuilder syncBlock()`
|
||||
|
||||
Force closed blocks to the disk device. See `CreateFlag#SYNC_BLOCK`
|
||||
|
||||
### `FSDataOutputStreamBuilder lazyPersist()`
|
||||
|
||||
Create the block on transient storage if possible.
|
||||
|
||||
### `FSDataOutputStreamBuilder newBlock()`
|
||||
|
||||
Append data to a new block instead of the end of the last partial block.
|
||||
|
||||
### `FSDataOutputStreamBuilder noLocalWrite()`
|
||||
|
||||
Advise that a block replica NOT be written to the local DataNode.
|
||||
|
||||
### `FSDataOutputStreamBuilder ecPolicyName()`
|
||||
|
||||
Enforce the file to be a striped file with erasure coding policy 'policyName',
|
||||
no matter what its parent directory's replication or erasure coding policy is.
|
||||
|
||||
### `FSDataOutputStreamBuilder replicate()`
|
||||
|
||||
Enforce the file to be a replicated file, no matter what its parent directory's
|
||||
replication or erasure coding policy is.
|
||||
|
||||
## Builder interface
|
||||
|
||||
### <a name="Builder.build"></a> `FSDataOutputStream build()`
|
||||
|
||||
Create a new file or append an existing file on the underlying `FileSystem`,
|
||||
and return `FSDataOutputStream` for write.
|
||||
|
||||
#### Preconditions
|
||||
|
||||
The following combinations of parameters are not supported:
|
||||
|
||||
if APPEND|OVERWRITE: raise HadoopIllegalArgumentException
|
||||
if CREATE|APPEND|OVERWRITE: raise HadoopIllegalArgumentExdeption
|
||||
|
||||
`FileSystem` may reject the request for other reasons and throw `IOException`,
|
||||
see `FileSystem#create(path, ...)` and `FileSystem#append()`.
|
||||
|
||||
#### Postconditions
|
||||
|
||||
FS' where :
|
||||
FS'.Files'[p] == []
|
||||
ancestors(p) is-subset-of FS'.Directories'
|
||||
|
||||
result = FSDataOutputStream
|
||||
|
||||
The result is `FSDataOutputStream` to be used to write data to filesystem.
|
|
@ -33,5 +33,6 @@ HDFS as these are commonly expected by Hadoop client applications.
|
|||
1. [Model](model.html)
|
||||
1. [FileSystem class](filesystem.html)
|
||||
1. [FSDataInputStream class](fsdatainputstream.html)
|
||||
1. [FSDataOutputStreamBuilder class](fsdataoutputstreambuilder.html)
|
||||
2. [Testing with the Filesystem specification](testing.html)
|
||||
2. [Extending the specification and its tests](extending.html)
|
||||
|
|
|
@ -103,6 +103,12 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
|
|||
xmlPrefixToSkipCompare.add("fs.s3n.");
|
||||
xmlPrefixToSkipCompare.add("s3native.");
|
||||
|
||||
// WASB properties are in a different subtree.
|
||||
// - org.apache.hadoop.fs.azure.NativeAzureFileSystem
|
||||
xmlPrefixToSkipCompare.add("fs.wasb.impl");
|
||||
xmlPrefixToSkipCompare.add("fs.wasbs.impl");
|
||||
xmlPrefixToSkipCompare.add("fs.azure.");
|
||||
|
||||
// ADL properties are in a different subtree
|
||||
// - org.apache.hadoop.hdfs.web.ADLConfKeys
|
||||
xmlPrefixToSkipCompare.add("adl.");
|
||||
|
|
|
@ -36,6 +36,7 @@ import java.util.Arrays;
|
|||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
|
@ -48,6 +49,7 @@ import static org.junit.Assert.assertArrayEquals;
|
|||
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration.IntegerRanges;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
@ -55,6 +57,9 @@ import org.apache.hadoop.test.GenericTestUtils;
|
|||
|
||||
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
|
||||
|
||||
import org.apache.log4j.AppenderSkeleton;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.log4j.spi.LoggingEvent;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
public class TestConfiguration extends TestCase {
|
||||
|
@ -78,6 +83,11 @@ public class TestConfiguration extends TestCase {
|
|||
/** Four apostrophes. */
|
||||
public static final String ESCAPED = "''''";
|
||||
|
||||
private static final String SENSITIVE_CONFIG_KEYS =
|
||||
CommonConfigurationKeysPublic.HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS;
|
||||
|
||||
private BufferedWriter out;
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
|
@ -86,6 +96,9 @@ public class TestConfiguration extends TestCase {
|
|||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
if(out != null) {
|
||||
out.close();
|
||||
}
|
||||
super.tearDown();
|
||||
new File(CONFIG).delete();
|
||||
new File(CONFIG2).delete();
|
||||
|
@ -151,16 +164,189 @@ public class TestConfiguration extends TestCase {
|
|||
startConfig();
|
||||
declareProperty("prop", "A", "A");
|
||||
endConfig();
|
||||
|
||||
InputStream in1 = new ByteArrayInputStream(writer.toString().getBytes());
|
||||
|
||||
InputStream in1 = Mockito.spy(new ByteArrayInputStream(
|
||||
writer.toString().getBytes()));
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.addResource(in1);
|
||||
assertEquals("A", conf.get("prop"));
|
||||
Mockito.verify(in1, Mockito.times(1)).close();
|
||||
InputStream in2 = new ByteArrayInputStream(writer.toString().getBytes());
|
||||
conf.addResource(in2);
|
||||
assertEquals("A", conf.get("prop"));
|
||||
}
|
||||
|
||||
public void testFinalWarnings() throws Exception {
|
||||
// Make a configuration file with a final property
|
||||
StringWriter writer = new StringWriter();
|
||||
out = new BufferedWriter(writer);
|
||||
startConfig();
|
||||
declareProperty("prop", "A", "A", true);
|
||||
endConfig();
|
||||
byte[] bytes = writer.toString().getBytes();
|
||||
InputStream in1 = new ByteArrayInputStream(bytes);
|
||||
|
||||
// Make a second config file with a final property with a different value
|
||||
writer = new StringWriter();
|
||||
out = new BufferedWriter(writer);
|
||||
startConfig();
|
||||
declareProperty("prop", "BB", "BB", true);
|
||||
endConfig();
|
||||
byte[] bytes2 = writer.toString().getBytes();
|
||||
InputStream in2 = new ByteArrayInputStream(bytes2);
|
||||
|
||||
// Attach our own log appender so we can verify output
|
||||
TestAppender appender = new TestAppender();
|
||||
final Logger logger = Logger.getRootLogger();
|
||||
logger.addAppender(appender);
|
||||
|
||||
try {
|
||||
// Add the 2 different resources - this should generate a warning
|
||||
conf.addResource(in1);
|
||||
conf.addResource(in2);
|
||||
assertEquals("should see the first value", "A", conf.get("prop"));
|
||||
|
||||
List<LoggingEvent> events = appender.getLog();
|
||||
assertEquals("overriding a final parameter should cause logging", 1,
|
||||
events.size());
|
||||
LoggingEvent loggingEvent = events.get(0);
|
||||
String renderedMessage = loggingEvent.getRenderedMessage();
|
||||
assertTrue("did not see expected string inside message "+ renderedMessage,
|
||||
renderedMessage.contains("an attempt to override final parameter: "
|
||||
+ "prop; Ignoring."));
|
||||
} finally {
|
||||
// Make sure the appender is removed
|
||||
logger.removeAppender(appender);
|
||||
}
|
||||
}
|
||||
|
||||
public void testNoFinalWarnings() throws Exception {
|
||||
// Make a configuration file with a final property
|
||||
StringWriter writer = new StringWriter();
|
||||
out = new BufferedWriter(writer);
|
||||
startConfig();
|
||||
declareProperty("prop", "A", "A", true);
|
||||
endConfig();
|
||||
byte[] bytes = writer.toString().getBytes();
|
||||
// The 2 input streams both have the same config file
|
||||
InputStream in1 = new ByteArrayInputStream(bytes);
|
||||
InputStream in2 = new ByteArrayInputStream(bytes);
|
||||
|
||||
// Attach our own log appender so we can verify output
|
||||
TestAppender appender = new TestAppender();
|
||||
final Logger logger = Logger.getRootLogger();
|
||||
logger.addAppender(appender);
|
||||
|
||||
try {
|
||||
// Add the resource twice from a stream - should not generate warnings
|
||||
conf.addResource(in1);
|
||||
conf.addResource(in2);
|
||||
assertEquals("A", conf.get("prop"));
|
||||
|
||||
List<LoggingEvent> events = appender.getLog();
|
||||
for (LoggingEvent loggingEvent : events) {
|
||||
System.out.println("Event = " + loggingEvent.getRenderedMessage());
|
||||
}
|
||||
assertTrue("adding same resource twice should not cause logging",
|
||||
events.isEmpty());
|
||||
} finally {
|
||||
// Make sure the appender is removed
|
||||
logger.removeAppender(appender);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void testFinalWarningsMultiple() throws Exception {
|
||||
// Make a configuration file with a repeated final property
|
||||
StringWriter writer = new StringWriter();
|
||||
out = new BufferedWriter(writer);
|
||||
startConfig();
|
||||
declareProperty("prop", "A", "A", true);
|
||||
declareProperty("prop", "A", "A", true);
|
||||
endConfig();
|
||||
byte[] bytes = writer.toString().getBytes();
|
||||
InputStream in1 = new ByteArrayInputStream(bytes);
|
||||
|
||||
// Attach our own log appender so we can verify output
|
||||
TestAppender appender = new TestAppender();
|
||||
final Logger logger = Logger.getRootLogger();
|
||||
logger.addAppender(appender);
|
||||
|
||||
try {
|
||||
// Add the resource - this should not produce a warning
|
||||
conf.addResource(in1);
|
||||
assertEquals("should see the value", "A", conf.get("prop"));
|
||||
|
||||
List<LoggingEvent> events = appender.getLog();
|
||||
for (LoggingEvent loggingEvent : events) {
|
||||
System.out.println("Event = " + loggingEvent.getRenderedMessage());
|
||||
}
|
||||
assertTrue("adding same resource twice should not cause logging",
|
||||
events.isEmpty());
|
||||
} finally {
|
||||
// Make sure the appender is removed
|
||||
logger.removeAppender(appender);
|
||||
}
|
||||
}
|
||||
|
||||
public void testFinalWarningsMultipleOverride() throws Exception {
|
||||
// Make a configuration file with 2 final properties with different values
|
||||
StringWriter writer = new StringWriter();
|
||||
out = new BufferedWriter(writer);
|
||||
startConfig();
|
||||
declareProperty("prop", "A", "A", true);
|
||||
declareProperty("prop", "BB", "BB", true);
|
||||
endConfig();
|
||||
byte[] bytes = writer.toString().getBytes();
|
||||
InputStream in1 = new ByteArrayInputStream(bytes);
|
||||
|
||||
// Attach our own log appender so we can verify output
|
||||
TestAppender appender = new TestAppender();
|
||||
final Logger logger = Logger.getRootLogger();
|
||||
logger.addAppender(appender);
|
||||
|
||||
try {
|
||||
// Add the resource - this should produce a warning
|
||||
conf.addResource(in1);
|
||||
assertEquals("should see the value", "A", conf.get("prop"));
|
||||
|
||||
List<LoggingEvent> events = appender.getLog();
|
||||
assertEquals("overriding a final parameter should cause logging", 1,
|
||||
events.size());
|
||||
LoggingEvent loggingEvent = events.get(0);
|
||||
String renderedMessage = loggingEvent.getRenderedMessage();
|
||||
assertTrue("did not see expected string inside message "+ renderedMessage,
|
||||
renderedMessage.contains("an attempt to override final parameter: "
|
||||
+ "prop; Ignoring."));
|
||||
} finally {
|
||||
// Make sure the appender is removed
|
||||
logger.removeAppender(appender);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A simple appender for white box testing.
|
||||
*/
|
||||
private static class TestAppender extends AppenderSkeleton {
|
||||
private final List<LoggingEvent> log = new ArrayList<>();
|
||||
|
||||
@Override public boolean requiresLayout() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override protected void append(final LoggingEvent loggingEvent) {
|
||||
log.add(loggingEvent);
|
||||
}
|
||||
|
||||
@Override public void close() {
|
||||
}
|
||||
|
||||
public List<LoggingEvent> getLog() {
|
||||
return new ArrayList<>(log);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests use of multi-byte characters in property names and values. This test
|
||||
* round-trips multi-byte string literals through saving and loading of config
|
||||
|
@ -701,8 +887,6 @@ public class TestConfiguration extends TestCase {
|
|||
new File(new File(relConfig).getParent()).delete();
|
||||
}
|
||||
|
||||
BufferedWriter out;
|
||||
|
||||
public void testIntegerRanges() {
|
||||
Configuration conf = new Configuration();
|
||||
conf.set("first", "-100");
|
||||
|
@ -1610,8 +1794,41 @@ public class TestConfiguration extends TestCase {
|
|||
assertEquals(fileResource.toString(),prop.getResource());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void testDumpSensitiveProperty() throws IOException {
|
||||
final String myPassword = "ThisIsMyPassword";
|
||||
Configuration testConf = new Configuration(false);
|
||||
out = new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
appendProperty("test.password", myPassword);
|
||||
endConfig();
|
||||
Path fileResource = new Path(CONFIG);
|
||||
testConf.addResource(fileResource);
|
||||
|
||||
try (StringWriter outWriter = new StringWriter()) {
|
||||
testConf.set(SENSITIVE_CONFIG_KEYS, "password$");
|
||||
Configuration.dumpConfiguration(testConf, "test.password", outWriter);
|
||||
assertFalse(outWriter.toString().contains(myPassword));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDumpSensitiveConfiguration() throws IOException {
|
||||
final String myPassword = "ThisIsMyPassword";
|
||||
Configuration testConf = new Configuration(false);
|
||||
out = new BufferedWriter(new FileWriter(CONFIG));
|
||||
startConfig();
|
||||
appendProperty("test.password", myPassword);
|
||||
endConfig();
|
||||
Path fileResource = new Path(CONFIG);
|
||||
testConf.addResource(fileResource);
|
||||
|
||||
try (StringWriter outWriter = new StringWriter()) {
|
||||
testConf.set(SENSITIVE_CONFIG_KEYS, "password$");
|
||||
Configuration.dumpConfiguration(testConf, outWriter);
|
||||
assertFalse(outWriter.toString().contains(myPassword));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetValByRegex() {
|
||||
Configuration conf = new Configuration();
|
||||
String key1 = "t.abc.key1";
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.junit.Test;
|
|||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
|
@ -17,11 +17,13 @@
|
|||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.test.LambdaTestUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
|
||||
|
@ -31,7 +33,11 @@ import static org.apache.hadoop.fs.FileSystemTestHelper.*;
|
|||
import java.io.*;
|
||||
import java.net.URI;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
|
||||
import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows;
|
||||
|
@ -46,6 +52,8 @@ import org.junit.Test;
|
|||
import org.junit.rules.Timeout;
|
||||
import org.mockito.internal.util.reflection.Whitebox;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
|
||||
|
||||
/**
|
||||
* This class tests the local file system via the FileSystem abstraction.
|
||||
|
@ -210,8 +218,8 @@ public class TestLocalFileSystem {
|
|||
|
||||
@Test
|
||||
public void testHomeDirectory() throws IOException {
|
||||
Path home = new Path(System.getProperty("user.home"))
|
||||
.makeQualified(fileSys);
|
||||
Path home = fileSys.makeQualified(
|
||||
new Path(System.getProperty("user.home")));
|
||||
Path fsHome = fileSys.getHomeDirectory();
|
||||
assertEquals(home, fsHome);
|
||||
}
|
||||
|
@ -221,7 +229,7 @@ public class TestLocalFileSystem {
|
|||
Path path = new Path(TEST_ROOT_DIR, "foo%bar");
|
||||
writeFile(fileSys, path, 1);
|
||||
FileStatus status = fileSys.getFileStatus(path);
|
||||
assertEquals(path.makeQualified(fileSys), status.getPath());
|
||||
assertEquals(fileSys.makeQualified(path), status.getPath());
|
||||
cleanupFile(fileSys, path);
|
||||
}
|
||||
|
||||
|
@ -659,7 +667,7 @@ public class TestLocalFileSystem {
|
|||
|
||||
try {
|
||||
FSDataOutputStreamBuilder builder =
|
||||
fileSys.createFile(path);
|
||||
fileSys.createFile(path).recursive();
|
||||
FSDataOutputStream out = builder.build();
|
||||
String content = "Create with a generic type of createFile!";
|
||||
byte[] contentOrigin = content.getBytes("UTF8");
|
||||
|
@ -703,4 +711,66 @@ public class TestLocalFileSystem {
|
|||
Assert.assertEquals("Buffer size should be 0",
|
||||
builder.getBufferSize(), 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* A builder to verify configuration keys are supported.
|
||||
*/
|
||||
private static class BuilderWithSupportedKeys
|
||||
extends FSDataOutputStreamBuilder<FSDataOutputStream,
|
||||
BuilderWithSupportedKeys> {
|
||||
|
||||
private final Set<String> supportedKeys = new HashSet<>();
|
||||
|
||||
BuilderWithSupportedKeys(@Nonnull final Collection<String> supportedKeys,
|
||||
@Nonnull FileSystem fileSystem, @Nonnull Path p) {
|
||||
super(fileSystem, p);
|
||||
this.supportedKeys.addAll(supportedKeys);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BuilderWithSupportedKeys getThisBuilder() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataOutputStream build()
|
||||
throws IllegalArgumentException, IOException {
|
||||
Set<String> unsupported = new HashSet<>(getMandatoryKeys());
|
||||
unsupported.removeAll(supportedKeys);
|
||||
Preconditions.checkArgument(unsupported.isEmpty(),
|
||||
"unsupported key found: " + supportedKeys);
|
||||
return getFS().create(
|
||||
getPath(), getPermission(), getFlags(), getBufferSize(),
|
||||
getReplication(), getBlockSize(), getProgress(), getChecksumOpt());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFSOutputStreamBuilderOptions() throws Exception {
|
||||
Path path = new Path(TEST_ROOT_DIR, "testBuilderOpt");
|
||||
final List<String> supportedKeys = Arrays.asList("strM");
|
||||
|
||||
FSDataOutputStreamBuilder<?, ?> builder =
|
||||
new BuilderWithSupportedKeys(supportedKeys, fileSys, path);
|
||||
builder.opt("strKey", "value");
|
||||
builder.opt("intKey", 123);
|
||||
builder.opt("strM", "ignored");
|
||||
// Over-write an optional value with a mandatory value.
|
||||
builder.must("strM", "value");
|
||||
builder.must("unsupported", 12.34);
|
||||
|
||||
assertEquals("Optional value should be overwrite by a mandatory value",
|
||||
"value", builder.getOptions().get("strM"));
|
||||
|
||||
Set<String> mandatoryKeys = builder.getMandatoryKeys();
|
||||
Set<String> expectedKeys = new HashSet<>();
|
||||
expectedKeys.add("strM");
|
||||
expectedKeys.add("unsupported");
|
||||
assertEquals(expectedKeys, mandatoryKeys);
|
||||
assertEquals(2, mandatoryKeys.size());
|
||||
|
||||
LambdaTestUtils.intercept(IllegalArgumentException.class,
|
||||
"unsupported key found", builder::build
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,6 +60,19 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB
|
|||
ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuilderAppendToEmptyFile() throws Throwable {
|
||||
touch(getFileSystem(), target);
|
||||
byte[] dataset = dataset(256, 'a', 'z');
|
||||
try (FSDataOutputStream outputStream =
|
||||
getFileSystem().appendFile(target).build()) {
|
||||
outputStream.write(dataset);
|
||||
}
|
||||
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
|
||||
dataset.length);
|
||||
ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAppendNonexistentFile() throws Throwable {
|
||||
try {
|
||||
|
@ -78,15 +91,29 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB
|
|||
byte[] original = dataset(8192, 'A', 'Z');
|
||||
byte[] appended = dataset(8192, '0', '9');
|
||||
createFile(getFileSystem(), target, false, original);
|
||||
FSDataOutputStream outputStream = getFileSystem().append(target);
|
||||
outputStream.write(appended);
|
||||
outputStream.close();
|
||||
try (FSDataOutputStream out = getFileSystem().append(target)) {
|
||||
out.write(appended);
|
||||
}
|
||||
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
|
||||
original.length + appended.length);
|
||||
ContractTestUtils.validateFileContent(bytes,
|
||||
new byte[] [] { original, appended });
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuilderAppendToExistingFile() throws Throwable {
|
||||
byte[] original = dataset(8192, 'A', 'Z');
|
||||
byte[] appended = dataset(8192, '0', '9');
|
||||
createFile(getFileSystem(), target, false, original);
|
||||
try (FSDataOutputStream out = getFileSystem().appendFile(target).build()) {
|
||||
out.write(appended);
|
||||
}
|
||||
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
|
||||
original.length + appended.length);
|
||||
ContractTestUtils.validateFileContent(bytes,
|
||||
new byte[][]{original, appended});
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAppendMissingTarget() throws Throwable {
|
||||
try {
|
||||
|
|
|
@ -47,24 +47,37 @@ public abstract class AbstractContractCreateTest extends
|
|||
*/
|
||||
public static final int CREATE_TIMEOUT = 15000;
|
||||
|
||||
@Test
|
||||
public void testCreateNewFile() throws Throwable {
|
||||
describe("Foundational 'create a file' test");
|
||||
Path path = path("testCreateNewFile");
|
||||
protected Path path(String filepath, boolean useBuilder) throws IOException {
|
||||
return super.path(filepath + (useBuilder ? "" : "-builder"));
|
||||
}
|
||||
|
||||
private void testCreateNewFile(boolean useBuilder) throws Throwable {
|
||||
describe("Foundational 'create a file' test, using builder API=" +
|
||||
useBuilder);
|
||||
Path path = path("testCreateNewFile", useBuilder);
|
||||
byte[] data = dataset(256, 'a', 'z');
|
||||
writeDataset(getFileSystem(), path, data, data.length, 1024 * 1024, false);
|
||||
writeDataset(getFileSystem(), path, data, data.length, 1024 * 1024, false,
|
||||
useBuilder);
|
||||
ContractTestUtils.verifyFileContents(getFileSystem(), path, data);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateFileOverExistingFileNoOverwrite() throws Throwable {
|
||||
describe("Verify overwriting an existing file fails");
|
||||
Path path = path("testCreateFileOverExistingFileNoOverwrite");
|
||||
public void testCreateNewFile() throws Throwable {
|
||||
testCreateNewFile(true);
|
||||
testCreateNewFile(false);
|
||||
}
|
||||
|
||||
private void testCreateFileOverExistingFileNoOverwrite(boolean useBuilder)
|
||||
throws Throwable {
|
||||
describe("Verify overwriting an existing file fails, using builder API=" +
|
||||
useBuilder);
|
||||
Path path = path("testCreateFileOverExistingFileNoOverwrite", useBuilder);
|
||||
byte[] data = dataset(256, 'a', 'z');
|
||||
writeDataset(getFileSystem(), path, data, data.length, 1024, false);
|
||||
byte[] data2 = dataset(10 * 1024, 'A', 'Z');
|
||||
try {
|
||||
writeDataset(getFileSystem(), path, data2, data2.length, 1024, false);
|
||||
writeDataset(getFileSystem(), path, data2, data2.length, 1024, false,
|
||||
useBuilder);
|
||||
fail("writing without overwrite unexpectedly succeeded");
|
||||
} catch (FileAlreadyExistsException expected) {
|
||||
//expected
|
||||
|
@ -76,6 +89,26 @@ public abstract class AbstractContractCreateTest extends
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateFileOverExistingFileNoOverwrite() throws Throwable {
|
||||
testCreateFileOverExistingFileNoOverwrite(false);
|
||||
testCreateFileOverExistingFileNoOverwrite(true);
|
||||
}
|
||||
|
||||
private void testOverwriteExistingFile(boolean useBuilder) throws Throwable {
|
||||
describe("Overwrite an existing file and verify the new data is there, " +
|
||||
"use builder API=" + useBuilder);
|
||||
Path path = path("testOverwriteExistingFile", useBuilder);
|
||||
byte[] data = dataset(256, 'a', 'z');
|
||||
writeDataset(getFileSystem(), path, data, data.length, 1024, false,
|
||||
useBuilder);
|
||||
ContractTestUtils.verifyFileContents(getFileSystem(), path, data);
|
||||
byte[] data2 = dataset(10 * 1024, 'A', 'Z');
|
||||
writeDataset(getFileSystem(), path, data2, data2.length, 1024, true,
|
||||
useBuilder);
|
||||
ContractTestUtils.verifyFileContents(getFileSystem(), path, data2);
|
||||
}
|
||||
|
||||
/**
|
||||
* This test catches some eventual consistency problems that blobstores exhibit,
|
||||
* as we are implicitly verifying that updates are consistent. This
|
||||
|
@ -84,25 +117,21 @@ public abstract class AbstractContractCreateTest extends
|
|||
*/
|
||||
@Test
|
||||
public void testOverwriteExistingFile() throws Throwable {
|
||||
describe("Overwrite an existing file and verify the new data is there");
|
||||
Path path = path("testOverwriteExistingFile");
|
||||
byte[] data = dataset(256, 'a', 'z');
|
||||
writeDataset(getFileSystem(), path, data, data.length, 1024, false);
|
||||
ContractTestUtils.verifyFileContents(getFileSystem(), path, data);
|
||||
byte[] data2 = dataset(10 * 1024, 'A', 'Z');
|
||||
writeDataset(getFileSystem(), path, data2, data2.length, 1024, true);
|
||||
ContractTestUtils.verifyFileContents(getFileSystem(), path, data2);
|
||||
testOverwriteExistingFile(false);
|
||||
testOverwriteExistingFile(true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOverwriteEmptyDirectory() throws Throwable {
|
||||
describe("verify trying to create a file over an empty dir fails");
|
||||
private void testOverwriteEmptyDirectory(boolean useBuilder)
|
||||
throws Throwable {
|
||||
describe("verify trying to create a file over an empty dir fails, " +
|
||||
"use builder API=" + useBuilder);
|
||||
Path path = path("testOverwriteEmptyDirectory");
|
||||
mkdirs(path);
|
||||
assertIsDirectory(path);
|
||||
byte[] data = dataset(256, 'a', 'z');
|
||||
try {
|
||||
writeDataset(getFileSystem(), path, data, data.length, 1024, true);
|
||||
writeDataset(getFileSystem(), path, data, data.length, 1024, true,
|
||||
useBuilder);
|
||||
assertIsDirectory(path);
|
||||
fail("write of file over empty dir succeeded");
|
||||
} catch (FileAlreadyExistsException expected) {
|
||||
|
@ -121,8 +150,15 @@ public abstract class AbstractContractCreateTest extends
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testOverwriteNonEmptyDirectory() throws Throwable {
|
||||
describe("verify trying to create a file over a non-empty dir fails");
|
||||
public void testOverwriteEmptyDirectory() throws Throwable {
|
||||
testOverwriteEmptyDirectory(false);
|
||||
testOverwriteEmptyDirectory(true);
|
||||
}
|
||||
|
||||
private void testOverwriteNonEmptyDirectory(boolean useBuilder)
|
||||
throws Throwable {
|
||||
describe("verify trying to create a file over a non-empty dir fails, " +
|
||||
"use builder API=" + useBuilder);
|
||||
Path path = path("testOverwriteNonEmptyDirectory");
|
||||
mkdirs(path);
|
||||
try {
|
||||
|
@ -140,7 +176,7 @@ public abstract class AbstractContractCreateTest extends
|
|||
byte[] data = dataset(256, 'a', 'z');
|
||||
try {
|
||||
writeDataset(getFileSystem(), path, data, data.length, 1024,
|
||||
true);
|
||||
true, useBuilder);
|
||||
FileStatus status = getFileSystem().getFileStatus(path);
|
||||
|
||||
boolean isDir = status.isDirectory();
|
||||
|
@ -166,6 +202,12 @@ public abstract class AbstractContractCreateTest extends
|
|||
assertIsFile(child);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOverwriteNonEmptyDirectory() throws Throwable {
|
||||
testOverwriteNonEmptyDirectory(false);
|
||||
testOverwriteNonEmptyDirectory(true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreatedFileIsImmediatelyVisible() throws Throwable {
|
||||
describe("verify that a newly created file exists as soon as open returns");
|
||||
|
|
|
@ -70,7 +70,8 @@ public class ContractTestUtils extends Assert {
|
|||
* Assert that a property in the property set matches the expected value.
|
||||
* @param props property set
|
||||
* @param key property name
|
||||
* @param expected expected value. If null, the property must not be in the set
|
||||
* @param expected expected value. If null, the property must not be in the
|
||||
* set
|
||||
*/
|
||||
public static void assertPropertyEquals(Properties props,
|
||||
String key,
|
||||
|
@ -146,16 +147,45 @@ public class ContractTestUtils extends Assert {
|
|||
int len,
|
||||
int buffersize,
|
||||
boolean overwrite) throws IOException {
|
||||
writeDataset(fs, path, src, len, buffersize, overwrite, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a file.
|
||||
* Optional flags control
|
||||
* whether file overwrite operations should be enabled
|
||||
* Optional using {@link org.apache.hadoop.fs.FSDataOutputStreamBuilder}
|
||||
*
|
||||
* @param fs filesystem
|
||||
* @param path path to write to
|
||||
* @param len length of data
|
||||
* @param overwrite should the create option allow overwrites?
|
||||
* @param useBuilder should use builder API to create file?
|
||||
* @throws IOException IO problems
|
||||
*/
|
||||
public static void writeDataset(FileSystem fs, Path path, byte[] src,
|
||||
int len, int buffersize, boolean overwrite, boolean useBuilder)
|
||||
throws IOException {
|
||||
assertTrue(
|
||||
"Not enough data in source array to write " + len + " bytes",
|
||||
src.length >= len);
|
||||
FSDataOutputStream out = fs.create(path,
|
||||
overwrite,
|
||||
fs.getConf()
|
||||
.getInt(IO_FILE_BUFFER_SIZE_KEY,
|
||||
IO_FILE_BUFFER_SIZE_DEFAULT),
|
||||
(short) 1,
|
||||
buffersize);
|
||||
FSDataOutputStream out;
|
||||
if (useBuilder) {
|
||||
out = fs.createFile(path)
|
||||
.overwrite(overwrite)
|
||||
.replication((short) 1)
|
||||
.bufferSize(buffersize)
|
||||
.blockSize(buffersize)
|
||||
.build();
|
||||
} else {
|
||||
out = fs.create(path,
|
||||
overwrite,
|
||||
fs.getConf()
|
||||
.getInt(IO_FILE_BUFFER_SIZE_KEY,
|
||||
IO_FILE_BUFFER_SIZE_DEFAULT),
|
||||
(short) 1,
|
||||
buffersize);
|
||||
}
|
||||
out.write(src, 0, len);
|
||||
out.close();
|
||||
assertFileHasLength(fs, path, len);
|
||||
|
@ -203,7 +233,7 @@ public class ContractTestUtils extends Assert {
|
|||
assertTrue("not a file " + statText, stat.isFile());
|
||||
assertEquals("wrong length " + statText, original.length, stat.getLen());
|
||||
byte[] bytes = readDataset(fs, path, original.length);
|
||||
compareByteArrays(original,bytes,original.length);
|
||||
compareByteArrays(original, bytes, original.length);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -222,7 +252,7 @@ public class ContractTestUtils extends Assert {
|
|||
stm.readFully(out);
|
||||
byte[] expected = Arrays.copyOfRange(fileContents, seekOff,
|
||||
seekOff + toRead);
|
||||
compareByteArrays(expected, out,toRead);
|
||||
compareByteArrays(expected, out, toRead);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -239,11 +269,11 @@ public class ContractTestUtils extends Assert {
|
|||
assertEquals("Number of bytes read != number written",
|
||||
len, received.length);
|
||||
int errors = 0;
|
||||
int first_error_byte = -1;
|
||||
int firstErrorByte = -1;
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (original[i] != received[i]) {
|
||||
if (errors == 0) {
|
||||
first_error_byte = i;
|
||||
firstErrorByte = i;
|
||||
}
|
||||
errors++;
|
||||
}
|
||||
|
@ -256,8 +286,8 @@ public class ContractTestUtils extends Assert {
|
|||
// the range either side of the first error to print
|
||||
// this is a purely arbitrary number, to aid user debugging
|
||||
final int overlap = 10;
|
||||
for (int i = Math.max(0, first_error_byte - overlap);
|
||||
i < Math.min(first_error_byte + overlap, len);
|
||||
for (int i = Math.max(0, firstErrorByte - overlap);
|
||||
i < Math.min(firstErrorByte + overlap, len);
|
||||
i++) {
|
||||
byte actual = received[i];
|
||||
byte expected = original[i];
|
||||
|
@ -450,7 +480,7 @@ public class ContractTestUtils extends Assert {
|
|||
public static void downgrade(String message, Throwable failure) {
|
||||
LOG.warn("Downgrading test " + message, failure);
|
||||
AssumptionViolatedException ave =
|
||||
new AssumptionViolatedException(failure, null);
|
||||
new AssumptionViolatedException(failure, null);
|
||||
throw ave;
|
||||
}
|
||||
|
||||
|
@ -494,9 +524,9 @@ public class ContractTestUtils extends Assert {
|
|||
int expected) throws IOException {
|
||||
FileStatus status = fs.getFileStatus(path);
|
||||
assertEquals(
|
||||
"Wrong file length of file " + path + " status: " + status,
|
||||
expected,
|
||||
status.getLen());
|
||||
"Wrong file length of file " + path + " status: " + status,
|
||||
expected,
|
||||
status.getLen());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -682,7 +712,8 @@ public class ContractTestUtils extends Assert {
|
|||
*/
|
||||
public static String ls(FileSystem fileSystem, Path path) throws IOException {
|
||||
if (path == null) {
|
||||
//surfaces when someone calls getParent() on something at the top of the path
|
||||
// surfaces when someone calls getParent() on something at the top of the
|
||||
// path
|
||||
return "/";
|
||||
}
|
||||
FileStatus[] stats;
|
||||
|
@ -864,7 +895,7 @@ public class ContractTestUtils extends Assert {
|
|||
}
|
||||
|
||||
/**
|
||||
* Test for the host being an OSX machine
|
||||
* Test for the host being an OSX machine.
|
||||
* @return true if the JVM thinks that is running on OSX
|
||||
*/
|
||||
public static boolean isOSX() {
|
||||
|
@ -887,8 +918,9 @@ public class ContractTestUtils extends Assert {
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (mismatch)
|
||||
if (mismatch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertFalse("File content of file is not as expected at offset " + idx,
|
||||
mismatch);
|
||||
|
@ -998,7 +1030,9 @@ public class ContractTestUtils extends Assert {
|
|||
* @throws IOException
|
||||
* thrown if an I/O error occurs while writing or reading the test file
|
||||
*/
|
||||
public static void createAndVerifyFile(FileSystem fs, Path parent, final long fileSize)
|
||||
public static void createAndVerifyFile(FileSystem fs,
|
||||
Path parent,
|
||||
final long fileSize)
|
||||
throws IOException {
|
||||
int testBufferSize = fs.getConf()
|
||||
.getInt(IO_CHUNK_BUFFER_SIZE, DEFAULT_IO_CHUNK_BUFFER_SIZE);
|
||||
|
@ -1495,13 +1529,21 @@ public class ContractTestUtils extends Assert {
|
|||
* printing some useful results in the process.
|
||||
*/
|
||||
public static final class NanoTimer {
|
||||
private final long startTime;
|
||||
private long startTime;
|
||||
private long endTime;
|
||||
|
||||
public NanoTimer() {
|
||||
startTime = now();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the timer. Equivalent to the reset button of a stopwatch.
|
||||
*/
|
||||
public void reset() {
|
||||
endTime = 0;
|
||||
startTime = now();
|
||||
}
|
||||
|
||||
/**
|
||||
* End the operation.
|
||||
* @return the duration of the operation
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.protocolPB;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.io.DataInputBuffer;
|
||||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
import static org.apache.hadoop.fs.FSProtos.*;
|
||||
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* Verify PB serialization of FS data structures.
|
||||
*/
|
||||
public class TestFSSerialization {
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("deprecation")
|
||||
public void testWritableFlagSerialization() throws Exception {
|
||||
final Path p = new Path("hdfs://yaks:4344/dingos/f");
|
||||
for (int i = 0; i < 0x8; ++i) {
|
||||
final boolean acl = 0 != (i & 0x1);
|
||||
final boolean crypt = 0 != (i & 0x2);
|
||||
final boolean ec = 0 != (i & 0x4);
|
||||
FileStatus stat = new FileStatus(1024L, false, 3, 1L << 31,
|
||||
12345678L, 87654321L, FsPermission.getFileDefault(),
|
||||
"hadoop", "unqbbc", null, p, acl, crypt, ec);
|
||||
DataOutputBuffer dob = new DataOutputBuffer();
|
||||
stat.write(dob);
|
||||
DataInputBuffer dib = new DataInputBuffer();
|
||||
dib.reset(dob.getData(), 0, dob.getLength());
|
||||
FileStatus fstat = new FileStatus();
|
||||
fstat.readFields(dib);
|
||||
assertEquals(stat, fstat);
|
||||
checkFields(stat, fstat);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUtilitySerialization() throws Exception {
|
||||
final Path p = new Path("hdfs://yaks:4344/dingos/f");
|
||||
FileStatus stat = new FileStatus(1024L, false, 3, 1L << 31,
|
||||
12345678L, 87654321L, FsPermission.createImmutable((short)0111),
|
||||
"hadoop", "unqbbc", null, p);
|
||||
FileStatusProto fsp = PBHelper.convert(stat);
|
||||
FileStatus stat2 = PBHelper.convert(fsp);
|
||||
assertEquals(stat, stat2);
|
||||
checkFields(stat, stat2);
|
||||
}
|
||||
|
||||
private static void checkFields(FileStatus expected, FileStatus actual) {
|
||||
assertEquals(expected.getPath(), actual.getPath());
|
||||
assertEquals(expected.isDirectory(), actual.isDirectory());
|
||||
assertEquals(expected.getLen(), actual.getLen());
|
||||
assertEquals(expected.getPermission(), actual.getPermission());
|
||||
assertEquals(expected.getOwner(), actual.getOwner());
|
||||
assertEquals(expected.getGroup(), actual.getGroup());
|
||||
assertEquals(expected.getModificationTime(), actual.getModificationTime());
|
||||
assertEquals(expected.getAccessTime(), actual.getAccessTime());
|
||||
assertEquals(expected.getReplication(), actual.getReplication());
|
||||
assertEquals(expected.getBlockSize(), actual.getBlockSize());
|
||||
assertEquals(expected.hasAcl(), actual.hasAcl());
|
||||
assertEquals(expected.isEncrypted(), actual.isEncrypted());
|
||||
assertEquals(expected.isErasureCoded(), actual.isErasureCoded());
|
||||
}
|
||||
|
||||
}
|
|
@ -682,4 +682,17 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
|||
stopHttpServer(myServer2);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBacklogSize() throws Exception
|
||||
{
|
||||
final int backlogSize = 2048;
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(HttpServer2.HTTP_SOCKET_BACKLOG_SIZE_KEY, backlogSize);
|
||||
HttpServer2 srv = createServer("test", conf);
|
||||
List<?> listeners = (List<?>) Whitebox.getInternalState(srv,
|
||||
"listeners");
|
||||
ServerConnector listener = (ServerConnector)listeners.get(0);
|
||||
assertEquals(backlogSize, listener.getAcceptQueueSize());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -131,4 +131,9 @@ public class TestGenericsUtil extends TestCase {
|
|||
GenericClass.class, c2);
|
||||
}
|
||||
|
||||
public void testIsLog4jLogger() throws Exception {
|
||||
assertFalse("False if clazz is null", GenericsUtil.isLog4jLogger(null));
|
||||
assertTrue("The implementation is Log4j",
|
||||
GenericsUtil.isLog4jLogger(TestGenericsUtil.class));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.util.curator;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.curator.test.TestingServer;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* Test the manager for ZooKeeper Curator.
|
||||
*/
|
||||
public class TestZKCuratorManager {
|
||||
|
||||
private TestingServer server;
|
||||
private ZKCuratorManager curator;
|
||||
|
||||
@Before
|
||||
public void setup() throws Exception {
|
||||
this.server = new TestingServer();
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(
|
||||
CommonConfigurationKeys.ZK_ADDRESS, this.server.getConnectString());
|
||||
|
||||
this.curator = new ZKCuratorManager(conf);
|
||||
this.curator.start();
|
||||
}
|
||||
|
||||
@After
|
||||
public void teardown() throws Exception {
|
||||
this.curator.close();
|
||||
if (this.server != null) {
|
||||
this.server.close();
|
||||
this.server = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReadWriteData() throws Exception {
|
||||
String testZNode = "/test";
|
||||
String expectedString = "testString";
|
||||
assertFalse(curator.exists(testZNode));
|
||||
curator.create(testZNode);
|
||||
assertTrue(curator.exists(testZNode));
|
||||
curator.setData(testZNode, expectedString, -1);
|
||||
String testString = curator.getStringData("/test");
|
||||
assertEquals(expectedString, testString);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testChildren() throws Exception {
|
||||
List<String> children = curator.getChildren("/");
|
||||
assertEquals(1, children.size());
|
||||
|
||||
assertFalse(curator.exists("/node1"));
|
||||
curator.create("/node1");
|
||||
assertTrue(curator.exists("/node1"));
|
||||
|
||||
assertFalse(curator.exists("/node2"));
|
||||
curator.create("/node2");
|
||||
assertTrue(curator.exists("/node2"));
|
||||
|
||||
children = curator.getChildren("/");
|
||||
assertEquals(3, children.size());
|
||||
|
||||
curator.delete("/node2");
|
||||
assertFalse(curator.exists("/node2"));
|
||||
children = curator.getChildren("/");
|
||||
assertEquals(2, children.size());
|
||||
}
|
||||
}
|
|
@ -919,15 +919,19 @@
|
|||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^( |\t)*of owner \(%u\), modification date \(%y, %Y\).( )*</expected-output>
|
||||
<expected-output>^( |\t)*of owner \(%u\), access date \(%x, %X\).( )*</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^( |\t)*%y shows UTC date as "yyyy-MM-dd HH:mm:ss" and( )*</expected-output>
|
||||
<expected-output>^( |\t)*modification date \(%y, %Y\).( )*</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^( |\t)*%Y shows milliseconds since January 1, 1970 UTC.( )*</expected-output>
|
||||
<expected-output>^( |\t)*%x and %y show UTC date as "yyyy-MM-dd HH:mm:ss" and( )*</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^( |\t)*%X and %Y show milliseconds since January 1, 1970 UTC.( )*</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
|
|
|
@ -15,18 +15,23 @@
|
|||
|
||||
load hadoop-functions_test_helper
|
||||
|
||||
@test "hadoop_escape_sed (positive 1)" {
|
||||
ret="$(hadoop_sed_escape "\pass&&word\0#\$asdf/g ><'\"~\`!@#$%^&*()_+-=")"
|
||||
expected="\\\\pass\&\&word\\\0#\$asdf\/g ><'\"~\`!@#$%^\&*()_+-="
|
||||
echo "actual >${ret}<"
|
||||
echo "expected >${expected}<"
|
||||
[ "${ret}" = "${expected}" ]
|
||||
@test "hadoop_add_array_param (empty)" {
|
||||
hadoop_add_array_param ARRAY value
|
||||
[ "${ARRAY[0]}" = value ]
|
||||
}
|
||||
|
||||
@test "hadoop_add_array_param (exist)" {
|
||||
ARRAY=("val2")
|
||||
hadoop_add_array_param ARRAY val1
|
||||
[ "${ARRAY[0]}" = val2 ]
|
||||
[ "${ARRAY[1]}" = val1 ]
|
||||
}
|
||||
|
||||
@test "hadoop_add_array_param (double exist)" {
|
||||
ARRAY=("val2" "val1")
|
||||
hadoop_add_array_param ARRAY val3
|
||||
[ "${ARRAY[0]}" = val2 ]
|
||||
[ "${ARRAY[1]}" = val1 ]
|
||||
[ "${ARRAY[2]}" = val3 ]
|
||||
}
|
||||
|
||||
@test "hadoop_escape_xml (positive 1)" {
|
||||
ret="$(hadoop_xml_escape "\pass&&word\0#\$asdf/g ><'\"~\`!@#$%^&*()_+-=")"
|
||||
expected="\\pass&&word\0#\$asdf/g \>\<\'\"~\`!@#\$%^&*()_+-="
|
||||
echo "actual >${ret}<"
|
||||
echo "expected >${expected}<"
|
||||
[ "${ret}" = "${expected}" ]
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
load hadoop-functions_test_helper
|
||||
|
||||
@test "hadoop_array_contains (empty)" {
|
||||
run hadoop_array_contains value "${ARRAY[@]}"
|
||||
[ "${status}" = 1 ]
|
||||
}
|
||||
|
||||
@test "hadoop_array_contains (exist)" {
|
||||
ARRAY=("value")
|
||||
run hadoop_array_contains value "${ARRAY[@]}"
|
||||
[ "${status}" = 0 ]
|
||||
}
|
||||
|
||||
@test "hadoop_array_contains (notexist)" {
|
||||
ARRAY=("different")
|
||||
run hadoop_array_contains value "${ARRAY[@]}"
|
||||
[ "${status}" = 1 ]
|
||||
}
|
||||
|
||||
@test "hadoop_array_contains (exist, multi)" {
|
||||
ARRAY=("val1" "val2" "val3")
|
||||
for j in val1 val2 val3; do
|
||||
run hadoop_array_contains "${j}" "${ARRAY[@]}"
|
||||
[ "${status}" = 0 ]
|
||||
done
|
||||
}
|
||||
|
||||
@test "hadoop_array_contains (multi, not exist)" {
|
||||
ARRAY=("val1" "val2" "val3")
|
||||
run hadoop_array_contains value "${ARRAY[@]}"
|
||||
[ "${status}" = 1 ]
|
||||
}
|
|
@ -13,5 +13,25 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
org.apache.hadoop.fs.azure.NativeAzureFileSystem
|
||||
org.apache.hadoop.fs.azure.NativeAzureFileSystem$Secure
|
||||
load hadoop-functions_test_helper
|
||||
|
||||
@test "hadoop_sort_array (empty)" {
|
||||
hadoop_sort_array ARRAY
|
||||
}
|
||||
|
||||
@test "hadoop_sort_array (single value)" {
|
||||
ARRAY=("value")
|
||||
hadoop_sort_array ARRAY
|
||||
}
|
||||
|
||||
@test "hadoop_sort_array (multiple value)" {
|
||||
ARRAY=("b" "c" "a")
|
||||
preifsod=$(echo "${IFS}" | od -c)
|
||||
hadoop_sort_array ARRAY
|
||||
postifsod=$(echo "${IFS}" | od -c)
|
||||
|
||||
[ "${ARRAY[0]}" = "a" ]
|
||||
[ "${ARRAY[1]}" = "b" ]
|
||||
[ "${ARRAY[2]}" = "c" ]
|
||||
[ "${preifsod}" = "${postifsod}" ]
|
||||
}
|
|
@ -16,7 +16,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
||||
hadoop_add_subcommand "kms" "run KMS, the Key Management Server"
|
||||
hadoop_add_subcommand "kms" daemon "run KMS, the Key Management Server"
|
||||
fi
|
||||
|
||||
## @description Command handler for kms subcommand
|
||||
|
@ -54,4 +54,4 @@ function hadoop_subcommand_kms
|
|||
[[ "${HADOOP_DAEMON_MODE}" == "start" ]]; then
|
||||
hadoop_mkdir "${KMS_TEMP:-${HADOOP_HOME}/temp}"
|
||||
fi
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2764,7 +2764,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
}
|
||||
}
|
||||
|
||||
public HashMap<String, String> getErasureCodingCodecs() throws IOException {
|
||||
public Map<String, String> getErasureCodingCodecs() throws IOException {
|
||||
checkOpen();
|
||||
try (TraceScope ignored = tracer.newScope("getErasureCodingCodecs")) {
|
||||
return namenode.getErasureCodingCodecs();
|
||||
|
@ -2774,25 +2774,43 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public AddECPolicyResponse[] addErasureCodingPolicies(
|
||||
ErasureCodingPolicy[] policies) throws IOException {
|
||||
checkOpen();
|
||||
return namenode.addErasureCodingPolicies(policies);
|
||||
try (TraceScope ignored = tracer.newScope("addErasureCodingPolicies")) {
|
||||
return namenode.addErasureCodingPolicies(policies);
|
||||
} catch (RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public void removeErasureCodingPolicy(String ecPolicyName)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
namenode.removeErasureCodingPolicy(ecPolicyName);
|
||||
try (TraceScope ignored = tracer.newScope("removeErasureCodingPolicy")) {
|
||||
namenode.removeErasureCodingPolicy(ecPolicyName);
|
||||
} catch (RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public void enableErasureCodingPolicy(String ecPolicyName)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
namenode.enableErasureCodingPolicy(ecPolicyName);
|
||||
try (TraceScope ignored = tracer.newScope("enableErasureCodingPolicy")) {
|
||||
namenode.enableErasureCodingPolicy(ecPolicyName);
|
||||
} catch (RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
SafeModeException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public void disableErasureCodingPolicy(String ecPolicyName)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
namenode.disableErasureCodingPolicy(ecPolicyName);
|
||||
try (TraceScope ignored = tracer.newScope("disableErasureCodingPolicy")) {
|
||||
namenode.disableErasureCodingPolicy(ecPolicyName);
|
||||
} catch (RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
SafeModeException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
|
||||
|
@ -3026,7 +3044,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*
|
||||
* @param src path to get the information for
|
||||
* @return Returns the policy information if file or directory on the path is
|
||||
* erasure coded, null otherwise
|
||||
* erasure coded, null otherwise. Null will be returned if directory or file
|
||||
* has REPLICATION policy.
|
||||
* @throws IOException
|
||||
*/
|
||||
|
||||
|
|
|
@ -61,4 +61,6 @@ public class DFSClientFaultInjector {
|
|||
public boolean skipRollingRestartWait() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public void sleepBeforeHedgedGet() {}
|
||||
}
|
||||
|
|
|
@ -830,60 +830,85 @@ public class DFSInputStream extends FSInputStream
|
|||
|
||||
private DNAddrPair chooseDataNode(LocatedBlock block,
|
||||
Collection<DatanodeInfo> ignoredNodes) throws IOException {
|
||||
return chooseDataNode(block, ignoredNodes, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Choose datanode to read from.
|
||||
*
|
||||
* @param block Block to choose datanode addr from
|
||||
* @param ignoredNodes Ignored nodes inside.
|
||||
* @param refetchIfRequired Whether to refetch if no nodes to chose
|
||||
* from.
|
||||
* @return Returns chosen DNAddrPair; Can be null if refetchIfRequired is
|
||||
* false.
|
||||
*/
|
||||
private DNAddrPair chooseDataNode(LocatedBlock block,
|
||||
Collection<DatanodeInfo> ignoredNodes, boolean refetchIfRequired)
|
||||
throws IOException {
|
||||
while (true) {
|
||||
DNAddrPair result = getBestNodeDNAddrPair(block, ignoredNodes);
|
||||
if (result != null) {
|
||||
return result;
|
||||
} else if (refetchIfRequired) {
|
||||
block = refetchLocations(block, ignoredNodes);
|
||||
} else {
|
||||
String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(),
|
||||
deadNodes, ignoredNodes);
|
||||
String blockInfo = block.getBlock() + " file=" + src;
|
||||
if (failures >= dfsClient.getConf().getMaxBlockAcquireFailures()) {
|
||||
String description = "Could not obtain block: " + blockInfo;
|
||||
DFSClient.LOG.warn(description + errMsg
|
||||
+ ". Throwing a BlockMissingException");
|
||||
throw new BlockMissingException(src, description,
|
||||
block.getStartOffset());
|
||||
}
|
||||
|
||||
DatanodeInfo[] nodes = block.getLocations();
|
||||
if (nodes == null || nodes.length == 0) {
|
||||
DFSClient.LOG.info("No node available for " + blockInfo);
|
||||
}
|
||||
DFSClient.LOG.info("Could not obtain " + block.getBlock()
|
||||
+ " from any node: " + errMsg
|
||||
+ ". Will get new block locations from namenode and retry...");
|
||||
try {
|
||||
// Introducing a random factor to the wait time before another retry.
|
||||
// The wait time is dependent on # of failures and a random factor.
|
||||
// At the first time of getting a BlockMissingException, the wait time
|
||||
// is a random number between 0..3000 ms. If the first retry
|
||||
// still fails, we will wait 3000 ms grace period before the 2nd retry.
|
||||
// Also at the second retry, the waiting window is expanded to 6000 ms
|
||||
// alleviating the request rate from the server. Similarly the 3rd retry
|
||||
// will wait 6000ms grace period before retry and the waiting window is
|
||||
// expanded to 9000ms.
|
||||
final int timeWindow = dfsClient.getConf().getTimeWindow();
|
||||
double waitTime = timeWindow * failures + // grace period for the last round of attempt
|
||||
// expanding time window for each failure
|
||||
timeWindow * (failures + 1) *
|
||||
ThreadLocalRandom.current().nextDouble();
|
||||
DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) +
|
||||
" IOException, will wait for " + waitTime + " msec.");
|
||||
Thread.sleep((long)waitTime);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new InterruptedIOException(
|
||||
"Interrupted while choosing DataNode for read.");
|
||||
}
|
||||
deadNodes.clear(); //2nd option is to remove only nodes[blockId]
|
||||
openInfo(true);
|
||||
block = refreshLocatedBlock(block);
|
||||
failures++;
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private LocatedBlock refetchLocations(LocatedBlock block,
|
||||
Collection<DatanodeInfo> ignoredNodes) throws IOException {
|
||||
String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(),
|
||||
deadNodes, ignoredNodes);
|
||||
String blockInfo = block.getBlock() + " file=" + src;
|
||||
if (failures >= dfsClient.getConf().getMaxBlockAcquireFailures()) {
|
||||
String description = "Could not obtain block: " + blockInfo;
|
||||
DFSClient.LOG.warn(description + errMsg
|
||||
+ ". Throwing a BlockMissingException");
|
||||
throw new BlockMissingException(src, description,
|
||||
block.getStartOffset());
|
||||
}
|
||||
|
||||
DatanodeInfo[] nodes = block.getLocations();
|
||||
if (nodes == null || nodes.length == 0) {
|
||||
DFSClient.LOG.info("No node available for " + blockInfo);
|
||||
}
|
||||
DFSClient.LOG.info("Could not obtain " + block.getBlock()
|
||||
+ " from any node: " + errMsg
|
||||
+ ". Will get new block locations from namenode and retry...");
|
||||
try {
|
||||
// Introducing a random factor to the wait time before another retry.
|
||||
// The wait time is dependent on # of failures and a random factor.
|
||||
// At the first time of getting a BlockMissingException, the wait time
|
||||
// is a random number between 0..3000 ms. If the first retry
|
||||
// still fails, we will wait 3000 ms grace period before the 2nd retry.
|
||||
// Also at the second retry, the waiting window is expanded to 6000 ms
|
||||
// alleviating the request rate from the server. Similarly the 3rd retry
|
||||
// will wait 6000ms grace period before retry and the waiting window is
|
||||
// expanded to 9000ms.
|
||||
final int timeWindow = dfsClient.getConf().getTimeWindow();
|
||||
// grace period for the last round of attempt
|
||||
double waitTime = timeWindow * failures +
|
||||
// expanding time window for each failure
|
||||
timeWindow * (failures + 1) *
|
||||
ThreadLocalRandom.current().nextDouble();
|
||||
DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) +
|
||||
" IOException, will wait for " + waitTime + " msec.");
|
||||
Thread.sleep((long)waitTime);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new InterruptedIOException(
|
||||
"Interrupted while choosing DataNode for read.");
|
||||
}
|
||||
deadNodes.clear(); //2nd option is to remove only nodes[blockId]
|
||||
openInfo(true);
|
||||
block = refreshLocatedBlock(block);
|
||||
failures++;
|
||||
return block;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the best node from which to stream the data.
|
||||
* @param block LocatedBlock, containing nodes in priority order.
|
||||
|
@ -985,6 +1010,7 @@ public class DFSInputStream extends FSInputStream
|
|||
return new Callable<ByteBuffer>() {
|
||||
@Override
|
||||
public ByteBuffer call() throws Exception {
|
||||
DFSClientFaultInjector.get().sleepBeforeHedgedGet();
|
||||
try (TraceScope ignored = dfsClient.getTracer().
|
||||
newScope("hedgedRead" + hedgedReadId, parentSpanId)) {
|
||||
actualGetFromOneDataNode(datanode, start, end, bb, corruptedBlocks);
|
||||
|
@ -1131,8 +1157,9 @@ public class DFSInputStream extends FSInputStream
|
|||
Future<ByteBuffer> firstRequest = hedgedService
|
||||
.submit(getFromDataNodeCallable);
|
||||
futures.add(firstRequest);
|
||||
Future<ByteBuffer> future = null;
|
||||
try {
|
||||
Future<ByteBuffer> future = hedgedService.poll(
|
||||
future = hedgedService.poll(
|
||||
conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
|
||||
if (future != null) {
|
||||
ByteBuffer result = future.get();
|
||||
|
@ -1142,34 +1169,38 @@ public class DFSInputStream extends FSInputStream
|
|||
}
|
||||
DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
|
||||
+ "read", conf.getHedgedReadThresholdMillis(), chosenNode.info);
|
||||
// Ignore this node on next go around.
|
||||
ignored.add(chosenNode.info);
|
||||
dfsClient.getHedgedReadMetrics().incHedgedReadOps();
|
||||
// continue; no need to refresh block locations
|
||||
} catch (ExecutionException e) {
|
||||
// Ignore
|
||||
futures.remove(future);
|
||||
} catch (InterruptedException e) {
|
||||
throw new InterruptedIOException(
|
||||
"Interrupted while waiting for reading task");
|
||||
}
|
||||
// Ignore this node on next go around.
|
||||
// If poll timeout and the request still ongoing, don't consider it
|
||||
// again. If read data failed, don't consider it either.
|
||||
ignored.add(chosenNode.info);
|
||||
} else {
|
||||
// We are starting up a 'hedged' read. We have a read already
|
||||
// ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.
|
||||
// If no nodes to do hedged reads against, pass.
|
||||
boolean refetch = false;
|
||||
try {
|
||||
chosenNode = getBestNodeDNAddrPair(block, ignored);
|
||||
if (chosenNode == null) {
|
||||
chosenNode = chooseDataNode(block, ignored);
|
||||
chosenNode = chooseDataNode(block, ignored, false);
|
||||
if (chosenNode != null) {
|
||||
// Latest block, if refreshed internally
|
||||
block = chosenNode.block;
|
||||
bb = ByteBuffer.allocate(len);
|
||||
Callable<ByteBuffer> getFromDataNodeCallable =
|
||||
getFromOneDataNode(chosenNode, block, start, end, bb,
|
||||
corruptedBlocks, hedgedReadId++);
|
||||
Future<ByteBuffer> oneMoreRequest =
|
||||
hedgedService.submit(getFromDataNodeCallable);
|
||||
futures.add(oneMoreRequest);
|
||||
} else {
|
||||
refetch = true;
|
||||
}
|
||||
// Latest block, if refreshed internally
|
||||
block = chosenNode.block;
|
||||
bb = ByteBuffer.allocate(len);
|
||||
Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(
|
||||
chosenNode, block, start, end, bb,
|
||||
corruptedBlocks, hedgedReadId++);
|
||||
Future<ByteBuffer> oneMoreRequest = hedgedService
|
||||
.submit(getFromDataNodeCallable);
|
||||
futures.add(oneMoreRequest);
|
||||
} catch (IOException ioe) {
|
||||
DFSClient.LOG.debug("Failed getting node for hedged read: {}",
|
||||
ioe.getMessage());
|
||||
|
@ -1187,6 +1218,9 @@ public class DFSInputStream extends FSInputStream
|
|||
} catch (InterruptedException ie) {
|
||||
// Ignore and retry
|
||||
}
|
||||
if (refetch) {
|
||||
refetchLocations(block, ignored);
|
||||
}
|
||||
// We got here if exception. Ignore this node on next go around IFF
|
||||
// we found a chosenNode to hedge read against.
|
||||
if (chosenNode != null && chosenNode.info != null) {
|
||||
|
|
|
@ -83,6 +83,7 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -811,10 +812,30 @@ public class DFSUtilClient {
|
|||
public static ThreadPoolExecutor getThreadPoolExecutor(int corePoolSize,
|
||||
int maxPoolSize, long keepAliveTimeSecs, String threadNamePrefix,
|
||||
boolean runRejectedExec) {
|
||||
return getThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTimeSecs,
|
||||
new SynchronousQueue<>(), threadNamePrefix, runRejectedExec);
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility to create a {@link ThreadPoolExecutor}.
|
||||
*
|
||||
* @param corePoolSize - min threads in the pool, even if idle
|
||||
* @param maxPoolSize - max threads in the pool
|
||||
* @param keepAliveTimeSecs - max seconds beyond which excess idle threads
|
||||
* will be terminated
|
||||
* @param queue - the queue to use for holding tasks before they are executed.
|
||||
* @param threadNamePrefix - name prefix for the pool threads
|
||||
* @param runRejectedExec - when true, rejected tasks from
|
||||
* ThreadPoolExecutor are run in the context of calling thread
|
||||
* @return ThreadPoolExecutor
|
||||
*/
|
||||
public static ThreadPoolExecutor getThreadPoolExecutor(int corePoolSize,
|
||||
int maxPoolSize, long keepAliveTimeSecs, BlockingQueue<Runnable> queue,
|
||||
String threadNamePrefix, boolean runRejectedExec) {
|
||||
Preconditions.checkArgument(corePoolSize > 0);
|
||||
ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(corePoolSize,
|
||||
maxPoolSize, keepAliveTimeSecs, TimeUnit.SECONDS,
|
||||
new SynchronousQueue<Runnable>(), new Daemon.DaemonFactory() {
|
||||
queue, new Daemon.DaemonFactory() {
|
||||
private final AtomicInteger threadIndex = new AtomicInteger(0);
|
||||
|
||||
@Override
|
||||
|
|
|
@ -26,7 +26,6 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -2515,8 +2514,6 @@ public class DistributedFileSystem extends FileSystem {
|
|||
public void setErasureCodingPolicy(final Path path,
|
||||
final String ecPolicyName) throws IOException {
|
||||
Path absF = fixRelativePart(path);
|
||||
Preconditions.checkNotNull(ecPolicyName, "Erasure coding policy cannot be" +
|
||||
" null.");
|
||||
new FileSystemLinkResolver<Void>() {
|
||||
@Override
|
||||
public Void doCall(final Path p) throws IOException {
|
||||
|
@ -2543,7 +2540,8 @@ public class DistributedFileSystem extends FileSystem {
|
|||
*
|
||||
* @param path The path of the file or directory
|
||||
* @return Returns the policy information if file or directory on the path
|
||||
* is erasure coded, null otherwise
|
||||
* is erasure coded, null otherwise. Null will be returned if directory or
|
||||
* file has REPLICATION policy.
|
||||
* @throws IOException
|
||||
*/
|
||||
public ErasureCodingPolicy getErasureCodingPolicy(final Path path)
|
||||
|
@ -2570,7 +2568,8 @@ public class DistributedFileSystem extends FileSystem {
|
|||
}
|
||||
|
||||
/**
|
||||
* Retrieve all the erasure coding policies supported by this file system.
|
||||
* Retrieve all the erasure coding policies supported by this file system,
|
||||
* excluding REPLICATION policy.
|
||||
*
|
||||
* @return all erasure coding policies supported by this file system.
|
||||
* @throws IOException
|
||||
|
@ -2587,7 +2586,7 @@ public class DistributedFileSystem extends FileSystem {
|
|||
* @return all erasure coding codecs and coders supported by this file system.
|
||||
* @throws IOException
|
||||
*/
|
||||
public HashMap<String, String> getAllErasureCodingCodecs()
|
||||
public Map<String, String> getAllErasureCodingCodecs()
|
||||
throws IOException {
|
||||
return dfs.getErasureCodingCodecs();
|
||||
}
|
||||
|
@ -2892,7 +2891,8 @@ public class DistributedFileSystem extends FileSystem {
|
|||
*/
|
||||
@Override
|
||||
public FSDataOutputStream build() throws IOException {
|
||||
if (getFlags().contains(CreateFlag.CREATE)) {
|
||||
if (getFlags().contains(CreateFlag.CREATE) ||
|
||||
getFlags().contains(CreateFlag.OVERWRITE)) {
|
||||
if (isRecursive()) {
|
||||
return dfs.create(getPath(), getPermission(), getFlags(),
|
||||
getBufferSize(), getReplication(), getBlockSize(),
|
||||
|
|
|
@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.protocol;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -1588,7 +1588,8 @@ public interface ClientProtocol {
|
|||
|
||||
|
||||
/**
|
||||
* Get the erasure coding policies loaded in Namenode.
|
||||
* Get the erasure coding policies loaded in Namenode, excluding REPLICATION
|
||||
* policy.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
|
@ -1601,10 +1602,11 @@ public interface ClientProtocol {
|
|||
* @throws IOException
|
||||
*/
|
||||
@Idempotent
|
||||
HashMap<String, String> getErasureCodingCodecs() throws IOException;
|
||||
Map<String, String> getErasureCodingCodecs() throws IOException;
|
||||
|
||||
/**
|
||||
* Get the information about the EC policy for the path.
|
||||
* Get the information about the EC policy for the path. Null will be returned
|
||||
* if directory or file has REPLICATION policy.
|
||||
*
|
||||
* @param src path to get the info for
|
||||
* @throws IOException
|
||||
|
|
|
@ -17,23 +17,28 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.commons.lang.builder.EqualsBuilder;
|
||||
import org.apache.commons.lang.builder.HashCodeBuilder;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
|
||||
|
||||
/**
|
||||
* A policy about how to write/read/code an erasure coding file.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public final class ErasureCodingPolicy {
|
||||
public final class ErasureCodingPolicy implements Serializable {
|
||||
|
||||
private static final long serialVersionUID = 0x0079fe4e;
|
||||
|
||||
private String name;
|
||||
private final ECSchema schema;
|
||||
private final int cellSize;
|
||||
private String name;
|
||||
private byte id;
|
||||
|
||||
public ErasureCodingPolicy(String name, ECSchema schema,
|
||||
|
@ -103,6 +108,10 @@ public final class ErasureCodingPolicy {
|
|||
this.id = id;
|
||||
}
|
||||
|
||||
public boolean isReplicationPolicy() {
|
||||
return (id == ErasureCodeConstants.REPLICATION_POLICY_ID);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (o == null) {
|
||||
|
|
|
@ -27,6 +27,11 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
|||
* done for backwards compatibility in case any existing clients assume the
|
||||
* value of FsPermission is in a particular range.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @deprecated ACLs, encryption, and erasure coding are managed on FileStatus.
|
||||
*/
|
||||
@Deprecated
|
||||
@InterfaceAudience.Private
|
||||
public class FsPermissionExtension extends FsPermission {
|
||||
private static final long serialVersionUID = 0x13c298a4;
|
||||
|
|
|
@ -48,8 +48,8 @@ public final class HdfsConstants {
|
|||
public static final byte COLD_STORAGE_POLICY_ID = 2;
|
||||
public static final String COLD_STORAGE_POLICY_NAME = "COLD";
|
||||
|
||||
// TODO should be conf injected?
|
||||
public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
|
||||
public static final int DEFAULT_DATA_SOCKET_SIZE = 0;
|
||||
|
||||
/**
|
||||
* A special path component contained in the path for a snapshot file/dir
|
||||
*/
|
||||
|
|
|
@ -17,7 +17,9 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -31,24 +33,15 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class HdfsFileStatus {
|
||||
public class HdfsFileStatus extends FileStatus {
|
||||
|
||||
private static final long serialVersionUID = 0x126eb82a;
|
||||
|
||||
// local name of the inode that's encoded in java UTF8
|
||||
private final byte[] path;
|
||||
private final byte[] symlink; // symlink target encoded in java UTF8 or null
|
||||
private final long length;
|
||||
private final boolean isdir;
|
||||
private final short block_replication;
|
||||
private final long blocksize;
|
||||
private final long modification_time;
|
||||
private final long access_time;
|
||||
private final FsPermission permission;
|
||||
private final String owner;
|
||||
private final String group;
|
||||
private byte[] uPath;
|
||||
private byte[] uSymlink; // symlink target encoded in java UTF8/null
|
||||
private final long fileId;
|
||||
|
||||
private final FileEncryptionInfo feInfo;
|
||||
|
||||
private final ErasureCodingPolicy ecPolicy;
|
||||
|
||||
// Used by dir, not including dot and dotdot. Always zero for a regular file.
|
||||
|
@ -57,12 +50,22 @@ public class HdfsFileStatus {
|
|||
|
||||
public static final byte[] EMPTY_NAME = new byte[0];
|
||||
|
||||
/**
|
||||
* Set of features potentially active on an instance.
|
||||
*/
|
||||
public enum Flags {
|
||||
HAS_ACL,
|
||||
HAS_CRYPT,
|
||||
HAS_EC;
|
||||
}
|
||||
private final EnumSet<Flags> flags;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param length the number of bytes the file has
|
||||
* @param isdir if the path is a directory
|
||||
* @param length the number of bytes the file has
|
||||
* @param isdir if the path is a directory
|
||||
* @param block_replication the replication factor
|
||||
* @param blocksize the block size
|
||||
* @param blocksize the block size
|
||||
* @param modification_time modification time
|
||||
* @param access_time access time
|
||||
* @param permission permission
|
||||
|
@ -77,25 +80,18 @@ public class HdfsFileStatus {
|
|||
* @param ecPolicy the erasure coding policy
|
||||
*/
|
||||
public HdfsFileStatus(long length, boolean isdir, int block_replication,
|
||||
long blocksize, long modification_time, long access_time,
|
||||
FsPermission permission, String owner, String group, byte[] symlink,
|
||||
byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
|
||||
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
|
||||
this.length = length;
|
||||
this.isdir = isdir;
|
||||
this.block_replication = (short) block_replication;
|
||||
this.blocksize = blocksize;
|
||||
this.modification_time = modification_time;
|
||||
this.access_time = access_time;
|
||||
this.permission = (permission == null) ?
|
||||
((isdir || symlink!=null) ?
|
||||
FsPermission.getDefault() :
|
||||
FsPermission.getFileDefault()) :
|
||||
permission;
|
||||
this.owner = (owner == null) ? "" : owner;
|
||||
this.group = (group == null) ? "" : group;
|
||||
this.symlink = symlink;
|
||||
this.path = path;
|
||||
long blocksize, long modification_time,
|
||||
long access_time, FsPermission permission,
|
||||
EnumSet<Flags> flags, String owner, String group,
|
||||
byte[] symlink, byte[] path, long fileId,
|
||||
int childrenNum, FileEncryptionInfo feInfo,
|
||||
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
|
||||
super(length, isdir, block_replication, blocksize, modification_time,
|
||||
access_time, convert(isdir, symlink != null, permission, flags),
|
||||
owner, group, null, null);
|
||||
this.flags = flags;
|
||||
this.uSymlink = symlink;
|
||||
this.uPath = path;
|
||||
this.fileId = fileId;
|
||||
this.childrenNum = childrenNum;
|
||||
this.feInfo = feInfo;
|
||||
|
@ -104,83 +100,48 @@ public class HdfsFileStatus {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get the length of this file, in bytes.
|
||||
* @return the length of this file, in bytes.
|
||||
* Set redundant flags for compatibility with existing applications.
|
||||
*/
|
||||
public final long getLen() {
|
||||
return length;
|
||||
protected static FsPermission convert(boolean isdir, boolean symlink,
|
||||
FsPermission p, EnumSet<Flags> f) {
|
||||
if (p instanceof FsPermissionExtension) {
|
||||
// verify flags are set consistently
|
||||
assert p.getAclBit() == f.contains(HdfsFileStatus.Flags.HAS_ACL);
|
||||
assert p.getEncryptedBit() == f.contains(HdfsFileStatus.Flags.HAS_CRYPT);
|
||||
assert p.getErasureCodedBit() == f.contains(HdfsFileStatus.Flags.HAS_EC);
|
||||
return p;
|
||||
}
|
||||
if (null == p) {
|
||||
if (isdir) {
|
||||
p = FsPermission.getDirDefault();
|
||||
} else if (symlink) {
|
||||
p = FsPermission.getDefault();
|
||||
} else {
|
||||
p = FsPermission.getFileDefault();
|
||||
}
|
||||
}
|
||||
return new FsPermissionExtension(p, f.contains(Flags.HAS_ACL),
|
||||
f.contains(Flags.HAS_CRYPT), f.contains(Flags.HAS_EC));
|
||||
}
|
||||
|
||||
/**
|
||||
* Is this a directory?
|
||||
* @return true if this is a directory
|
||||
*/
|
||||
public final boolean isDir() {
|
||||
return isdir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Is this a symbolic link?
|
||||
* @return true if this is a symbolic link
|
||||
*/
|
||||
@Override
|
||||
public boolean isSymlink() {
|
||||
return symlink != null;
|
||||
return uSymlink != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the block size of the file.
|
||||
* @return the number of bytes
|
||||
*/
|
||||
public final long getBlockSize() {
|
||||
return blocksize;
|
||||
@Override
|
||||
public boolean hasAcl() {
|
||||
return flags.contains(Flags.HAS_ACL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the replication factor of a file.
|
||||
* @return the replication factor of a file.
|
||||
*/
|
||||
public final short getReplication() {
|
||||
return block_replication;
|
||||
@Override
|
||||
public boolean isEncrypted() {
|
||||
return flags.contains(Flags.HAS_CRYPT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the modification time of the file.
|
||||
* @return the modification time of file in milliseconds since January 1, 1970 UTC.
|
||||
*/
|
||||
public final long getModificationTime() {
|
||||
return modification_time;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the access time of the file.
|
||||
* @return the access time of file in milliseconds since January 1, 1970 UTC.
|
||||
*/
|
||||
public final long getAccessTime() {
|
||||
return access_time;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get FsPermission associated with the file.
|
||||
* @return permission
|
||||
*/
|
||||
public final FsPermission getPermission() {
|
||||
return permission;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the owner of the file.
|
||||
* @return owner of the file
|
||||
*/
|
||||
public final String getOwner() {
|
||||
return owner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the group associated with the file.
|
||||
* @return group for the file.
|
||||
*/
|
||||
public final String getGroup() {
|
||||
return group;
|
||||
@Override
|
||||
public boolean isErasureCoded() {
|
||||
return flags.contains(Flags.HAS_EC);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -188,7 +149,7 @@ public class HdfsFileStatus {
|
|||
* @return true if the name is empty
|
||||
*/
|
||||
public final boolean isEmptyLocalName() {
|
||||
return path.length == 0;
|
||||
return uPath.length == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -196,7 +157,7 @@ public class HdfsFileStatus {
|
|||
* @return the local name in string
|
||||
*/
|
||||
public final String getLocalName() {
|
||||
return DFSUtilClient.bytes2String(path);
|
||||
return DFSUtilClient.bytes2String(uPath);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -204,7 +165,7 @@ public class HdfsFileStatus {
|
|||
* @return the local name in java UTF8
|
||||
*/
|
||||
public final byte[] getLocalNameInBytes() {
|
||||
return path;
|
||||
return uPath;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -238,16 +199,24 @@ public class HdfsFileStatus {
|
|||
return new Path(parent, getLocalName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the string representation of the symlink.
|
||||
* @return the symlink as a string.
|
||||
*/
|
||||
public final String getSymlink() {
|
||||
return DFSUtilClient.bytes2String(symlink);
|
||||
@Override
|
||||
public Path getSymlink() throws IOException {
|
||||
if (isSymlink()) {
|
||||
return new Path(DFSUtilClient.bytes2String(uSymlink));
|
||||
}
|
||||
throw new IOException("Path " + getPath() + " is not a symbolic link");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setSymlink(Path sym) {
|
||||
uSymlink = DFSUtilClient.string2Bytes(sym.toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Opaque referant for the symlink, to be resolved at the client.
|
||||
*/
|
||||
public final byte[] getSymlinkInBytes() {
|
||||
return symlink;
|
||||
return uSymlink;
|
||||
}
|
||||
|
||||
public final long getFileId() {
|
||||
|
@ -275,13 +244,30 @@ public class HdfsFileStatus {
|
|||
return storagePolicy;
|
||||
}
|
||||
|
||||
public final FileStatus makeQualified(URI defaultUri, Path path) {
|
||||
return new FileStatus(getLen(), isDir(), getReplication(),
|
||||
getBlockSize(), getModificationTime(),
|
||||
getAccessTime(),
|
||||
getPermission(), getOwner(), getGroup(),
|
||||
isSymlink() ? new Path(getSymlink()) : null,
|
||||
(getFullPath(path)).makeQualified(
|
||||
defaultUri, null)); // fully-qualify path
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
// satisfy findbugs
|
||||
return super.equals(o);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// satisfy findbugs
|
||||
return super.hashCode();
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve the short name of the Path given the URI, parent provided. This
|
||||
* FileStatus reference will not contain a valid Path until it is resolved
|
||||
* by this method.
|
||||
* @param defaultUri FileSystem to fully qualify HDFS path.
|
||||
* @param parent Parent path of this element.
|
||||
* @return Reference to this instance.
|
||||
*/
|
||||
public final FileStatus makeQualified(URI defaultUri, Path parent) {
|
||||
// fully-qualify path
|
||||
setPath(getFullPath(parent).makeQualified(defaultUri, null));
|
||||
return this; // API compatibility
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.net.URI;
|
||||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -34,7 +35,14 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
|
|||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class HdfsLocatedFileStatus extends HdfsFileStatus {
|
||||
private final LocatedBlocks locations;
|
||||
|
||||
private static final long serialVersionUID = 0x23c73328;
|
||||
|
||||
/**
|
||||
* Left transient, because {@link #makeQualifiedLocated(URI,Path)}
|
||||
* is the user-facing type.
|
||||
*/
|
||||
private transient LocatedBlocks locations;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
@ -56,12 +64,12 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
|
|||
*/
|
||||
public HdfsLocatedFileStatus(long length, boolean isdir,
|
||||
int block_replication, long blocksize, long modification_time,
|
||||
long access_time, FsPermission permission, String owner, String group,
|
||||
byte[] symlink, byte[] path, long fileId, LocatedBlocks locations,
|
||||
int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
|
||||
ErasureCodingPolicy ecPolicy) {
|
||||
long access_time, FsPermission permission, EnumSet<Flags> flags,
|
||||
String owner, String group, byte[] symlink, byte[] path, long fileId,
|
||||
LocatedBlocks locations, int childrenNum, FileEncryptionInfo feInfo,
|
||||
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
|
||||
super(length, isdir, block_replication, blocksize, modification_time,
|
||||
access_time, permission, owner, group, symlink, path, fileId,
|
||||
access_time, permission, flags, owner, group, symlink, path, fileId,
|
||||
childrenNum, feInfo, storagePolicy, ecPolicy);
|
||||
this.locations = locations;
|
||||
}
|
||||
|
@ -72,13 +80,21 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
|
|||
|
||||
public final LocatedFileStatus makeQualifiedLocated(URI defaultUri,
|
||||
Path path) {
|
||||
return new LocatedFileStatus(getLen(), isDir(), getReplication(),
|
||||
getBlockSize(), getModificationTime(),
|
||||
getAccessTime(),
|
||||
getPermission(), getOwner(), getGroup(),
|
||||
isSymlink() ? new Path(getSymlink()) : null,
|
||||
(getFullPath(path)).makeQualified(
|
||||
defaultUri, null), // fully-qualify path
|
||||
makeQualified(defaultUri, path);
|
||||
return new LocatedFileStatus(this,
|
||||
DFSUtilClient.locatedBlocks2Locations(getBlockLocations()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
// satisfy findbugs
|
||||
return super.equals(o);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// satisfy findbugs
|
||||
return super.hashCode();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.PrintStream;
|
|||
import java.text.SimpleDateFormat;
|
||||
import java.util.Comparator;
|
||||
import java.util.Date;
|
||||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -57,11 +58,12 @@ public class SnapshottableDirectoryStatus {
|
|||
private final byte[] parentFullPath;
|
||||
|
||||
public SnapshottableDirectoryStatus(long modification_time, long access_time,
|
||||
FsPermission permission, String owner, String group, byte[] localName,
|
||||
long inodeId, int childrenNum,
|
||||
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
|
||||
FsPermission permission, EnumSet<HdfsFileStatus.Flags> flags,
|
||||
String owner, String group, byte[] localName, long inodeId,
|
||||
int childrenNum, int snapshotNumber, int snapshotQuota,
|
||||
byte[] parentFullPath) {
|
||||
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
|
||||
access_time, permission, owner, group, null, localName, inodeId,
|
||||
access_time, permission, flags, owner, group, null, localName, inodeId,
|
||||
childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
|
||||
null);
|
||||
this.snapshotNumber = snapshotNumber;
|
||||
|
|
|
@ -68,6 +68,13 @@ public final class SystemErasureCodingPolicies {
|
|||
new ErasureCodingPolicy(ErasureCodeConstants.RS_10_4_SCHEMA,
|
||||
DEFAULT_CELLSIZE, RS_10_4_POLICY_ID);
|
||||
|
||||
// REPLICATION policy is always enabled.
|
||||
private static final ErasureCodingPolicy REPLICATION_POLICY =
|
||||
new ErasureCodingPolicy(ErasureCodeConstants.REPLICATION_POLICY_NAME,
|
||||
ErasureCodeConstants.REPLICATION_1_2_SCHEMA,
|
||||
DEFAULT_CELLSIZE,
|
||||
ErasureCodeConstants.REPLICATION_POLICY_ID);
|
||||
|
||||
private static final List<ErasureCodingPolicy> SYS_POLICIES =
|
||||
Collections.unmodifiableList(Arrays.asList(
|
||||
SYS_POLICY1, SYS_POLICY2, SYS_POLICY3, SYS_POLICY4,
|
||||
|
@ -118,4 +125,11 @@ public final class SystemErasureCodingPolicies {
|
|||
public static ErasureCodingPolicy getByName(String name) {
|
||||
return SYSTEM_POLICIES_BY_NAME.get(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the special REPLICATION policy.
|
||||
*/
|
||||
public static ErasureCodingPolicy getReplicationPolicy() {
|
||||
return REPLICATION_POLICY;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.util.List;
|
|||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
@ -1518,7 +1519,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
final SetErasureCodingPolicyRequestProto.Builder builder =
|
||||
SetErasureCodingPolicyRequestProto.newBuilder();
|
||||
builder.setSrc(src);
|
||||
builder.setEcPolicyName(ecPolicyName);
|
||||
if (ecPolicyName != null) {
|
||||
builder.setEcPolicyName(ecPolicyName);
|
||||
}
|
||||
SetErasureCodingPolicyRequestProto req = builder.build();
|
||||
try {
|
||||
rpcProxy.setErasureCodingPolicy(null, req);
|
||||
|
@ -1758,11 +1761,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public HashMap<String, String> getErasureCodingCodecs() throws IOException {
|
||||
public Map<String, String> getErasureCodingCodecs() throws IOException {
|
||||
try {
|
||||
GetErasureCodingCodecsResponseProto response = rpcProxy
|
||||
.getErasureCodingCodecs(null, VOID_GET_EC_CODEC_REQUEST);
|
||||
HashMap<String, String> ecCodecs = new HashMap<String, String>();
|
||||
Map<String, String> ecCodecs = new HashMap<>();
|
||||
for (CodecProto codec : response.getCodecList()) {
|
||||
ecCodecs.put(codec.getCodec(), codec.getCoders());
|
||||
}
|
||||
|
|
|
@ -104,6 +104,7 @@ import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntrySco
|
|||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
|
||||
|
@ -149,7 +150,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Sto
|
|||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType;
|
||||
|
@ -1142,7 +1142,7 @@ public class PBHelperClient {
|
|||
}
|
||||
|
||||
public static FsPermission convert(FsPermissionProto p) {
|
||||
return new FsPermissionExtension((short)p.getPerm());
|
||||
return new FsPermission((short)p.getPerm());
|
||||
}
|
||||
|
||||
private static Event.CreateEvent.INodeType createTypeConvert(
|
||||
|
@ -1501,10 +1501,14 @@ public class PBHelperClient {
|
|||
return null;
|
||||
}
|
||||
final HdfsFileStatusProto status = sdirStatusProto.getDirStatus();
|
||||
EnumSet<HdfsFileStatus.Flags> flags = status.hasFlags()
|
||||
? convertFlags(status.getFlags())
|
||||
: convertFlags(status.getPermission());
|
||||
return new SnapshottableDirectoryStatus(
|
||||
status.getModificationTime(),
|
||||
status.getAccessTime(),
|
||||
convert(status.getPermission()),
|
||||
flags,
|
||||
status.getOwner(),
|
||||
status.getGroup(),
|
||||
status.getPath().toByteArray(),
|
||||
|
@ -1546,17 +1550,23 @@ public class PBHelperClient {
|
|||
}
|
||||
|
||||
public static FsPermissionProto convert(FsPermission p) {
|
||||
return FsPermissionProto.newBuilder().setPerm(p.toExtendedShort()).build();
|
||||
return FsPermissionProto.newBuilder().setPerm(p.toShort()).build();
|
||||
}
|
||||
|
||||
public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
|
||||
if (fs == null)
|
||||
if (fs == null) {
|
||||
return null;
|
||||
}
|
||||
EnumSet<HdfsFileStatus.Flags> flags = fs.hasFlags()
|
||||
? convertFlags(fs.getFlags())
|
||||
: convertFlags(fs.getPermission());
|
||||
return new HdfsLocatedFileStatus(
|
||||
fs.getLength(), fs.getFileType().equals(FileType.IS_DIR),
|
||||
fs.getBlockReplication(), fs.getBlocksize(),
|
||||
fs.getModificationTime(), fs.getAccessTime(),
|
||||
convert(fs.getPermission()), fs.getOwner(), fs.getGroup(),
|
||||
convert(fs.getPermission()),
|
||||
flags,
|
||||
fs.getOwner(), fs.getGroup(),
|
||||
fs.getFileType().equals(FileType.IS_SYMLINK) ?
|
||||
fs.getSymlink().toByteArray() : null,
|
||||
fs.getPath().toByteArray(),
|
||||
|
@ -1569,6 +1579,47 @@ public class PBHelperClient {
|
|||
fs.hasEcPolicy() ? convertErasureCodingPolicy(fs.getEcPolicy()) : null);
|
||||
}
|
||||
|
||||
private static EnumSet<HdfsFileStatus.Flags> convertFlags(int flags) {
|
||||
EnumSet<HdfsFileStatus.Flags> f =
|
||||
EnumSet.noneOf(HdfsFileStatus.Flags.class);
|
||||
for (HdfsFileStatusProto.Flags pbf : HdfsFileStatusProto.Flags.values()) {
|
||||
if ((pbf.getNumber() & flags) != 0) {
|
||||
switch (pbf) {
|
||||
case HAS_ACL:
|
||||
f.add(HdfsFileStatus.Flags.HAS_ACL);
|
||||
break;
|
||||
case HAS_CRYPT:
|
||||
f.add(HdfsFileStatus.Flags.HAS_CRYPT);
|
||||
break;
|
||||
case HAS_EC:
|
||||
f.add(HdfsFileStatus.Flags.HAS_EC);
|
||||
break;
|
||||
default:
|
||||
// ignore unknown
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
private static EnumSet<HdfsFileStatus.Flags> convertFlags(
|
||||
FsPermissionProto pbp) {
|
||||
EnumSet<HdfsFileStatus.Flags> f =
|
||||
EnumSet.noneOf(HdfsFileStatus.Flags.class);
|
||||
FsPermission p = new FsPermissionExtension((short)pbp.getPerm());
|
||||
if (p.getAclBit()) {
|
||||
f.add(HdfsFileStatus.Flags.HAS_ACL);
|
||||
}
|
||||
if (p.getEncryptedBit()) {
|
||||
f.add(HdfsFileStatus.Flags.HAS_CRYPT);
|
||||
}
|
||||
if (p.getErasureCodedBit()) {
|
||||
f.add(HdfsFileStatus.Flags.HAS_EC);
|
||||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
public static CorruptFileBlocks convert(CorruptFileBlocksProto c) {
|
||||
if (c == null)
|
||||
return null;
|
||||
|
@ -2044,7 +2095,7 @@ public class PBHelperClient {
|
|||
if (fs == null)
|
||||
return null;
|
||||
FileType fType = FileType.IS_FILE;
|
||||
if (fs.isDir()) {
|
||||
if (fs.isDirectory()) {
|
||||
fType = FileType.IS_DIR;
|
||||
} else if (fs.isSymlink()) {
|
||||
fType = FileType.IS_SYMLINK;
|
||||
|
@ -2082,6 +2133,10 @@ public class PBHelperClient {
|
|||
builder.setEcPolicy(convertErasureCodingPolicy(
|
||||
fs.getErasureCodingPolicy()));
|
||||
}
|
||||
int flags = fs.hasAcl() ? HdfsFileStatusProto.Flags.HAS_ACL_VALUE : 0;
|
||||
flags |= fs.isEncrypted() ? HdfsFileStatusProto.Flags.HAS_CRYPT_VALUE : 0;
|
||||
flags |= fs.isErasureCoded() ? HdfsFileStatusProto.Flags.HAS_EC_VALUE : 0;
|
||||
builder.setFlags(flags);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
|
@ -61,6 +60,7 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -97,17 +97,8 @@ class JsonUtilClient {
|
|||
}
|
||||
|
||||
/** Convert a string to a FsPermission object. */
|
||||
static FsPermission toFsPermission(
|
||||
final String s, Boolean aclBit, Boolean encBit, Boolean erasureBit) {
|
||||
FsPermission perm = new FsPermission(Short.parseShort(s, 8));
|
||||
final boolean aBit = (aclBit != null) ? aclBit : false;
|
||||
final boolean eBit = (encBit != null) ? encBit : false;
|
||||
final boolean ecBit = (erasureBit != null) ? erasureBit : false;
|
||||
if (aBit || eBit || ecBit) {
|
||||
return new FsPermissionExtension(perm, aBit, eBit, ecBit);
|
||||
} else {
|
||||
return perm;
|
||||
}
|
||||
static FsPermission toFsPermission(final String s) {
|
||||
return null == s ? null : new FsPermission(Short.parseShort(s, 8));
|
||||
}
|
||||
|
||||
/** Convert a Json map to a HdfsFileStatus object. */
|
||||
|
@ -128,10 +119,23 @@ class JsonUtilClient {
|
|||
final long len = ((Number) m.get("length")).longValue();
|
||||
final String owner = (String) m.get("owner");
|
||||
final String group = (String) m.get("group");
|
||||
final FsPermission permission = toFsPermission((String) m.get("permission"),
|
||||
(Boolean) m.get("aclBit"),
|
||||
(Boolean) m.get("encBit"),
|
||||
(Boolean) m.get("ecBit"));
|
||||
final FsPermission permission = toFsPermission((String)m.get("permission"));
|
||||
|
||||
Boolean aclBit = (Boolean) m.get("aclBit");
|
||||
Boolean encBit = (Boolean) m.get("encBit");
|
||||
Boolean erasureBit = (Boolean) m.get("ecBit");
|
||||
EnumSet<HdfsFileStatus.Flags> f =
|
||||
EnumSet.noneOf(HdfsFileStatus.Flags.class);
|
||||
if (aclBit != null && aclBit) {
|
||||
f.add(HdfsFileStatus.Flags.HAS_ACL);
|
||||
}
|
||||
if (encBit != null && encBit) {
|
||||
f.add(HdfsFileStatus.Flags.HAS_CRYPT);
|
||||
}
|
||||
if (erasureBit != null && erasureBit) {
|
||||
f.add(HdfsFileStatus.Flags.HAS_EC);
|
||||
}
|
||||
|
||||
final long aTime = ((Number) m.get("accessTime")).longValue();
|
||||
final long mTime = ((Number) m.get("modificationTime")).longValue();
|
||||
final long blockSize = ((Number) m.get("blockSize")).longValue();
|
||||
|
@ -143,11 +147,11 @@ class JsonUtilClient {
|
|||
final byte storagePolicy = m.containsKey("storagePolicy") ?
|
||||
(byte) ((Number) m.get("storagePolicy")).longValue() :
|
||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
||||
return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY,
|
||||
replication, blockSize, mTime, aTime, permission, owner, group,
|
||||
symlink, DFSUtilClient.string2Bytes(localName),
|
||||
fileId, childrenNum, null,
|
||||
storagePolicy, null);
|
||||
return new HdfsFileStatus(len,
|
||||
type == WebHdfsConstants.PathType.DIRECTORY, replication, blockSize,
|
||||
mTime, aTime, permission, f, owner, group, symlink,
|
||||
DFSUtilClient.string2Bytes(localName), fileId, childrenNum,
|
||||
null, storagePolicy, null);
|
||||
}
|
||||
|
||||
static HdfsFileStatus[] toHdfsFileStatusArray(final Map<?, ?> json) {
|
||||
|
@ -465,9 +469,7 @@ class JsonUtilClient {
|
|||
aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit"));
|
||||
String permString = (String) m.get("permission");
|
||||
if (permString != null) {
|
||||
final FsPermission permission = toFsPermission(permString,
|
||||
(Boolean) m.get("aclBit"), (Boolean) m.get("encBit"),
|
||||
(Boolean) m.get("ecBit"));
|
||||
final FsPermission permission = toFsPermission(permString);
|
||||
aclStatusBuilder.setPermission(permission);
|
||||
}
|
||||
final List<?> entries = (List<?>) m.get("entries");
|
||||
|
|
|
@ -32,7 +32,13 @@ public class WebHdfsConstants {
|
|||
FILE, DIRECTORY, SYMLINK;
|
||||
|
||||
static PathType valueOf(HdfsFileStatus status) {
|
||||
return status.isDir()? DIRECTORY: status.isSymlink()? SYMLINK: FILE;
|
||||
if (status.isDirectory()) {
|
||||
return DIRECTORY;
|
||||
}
|
||||
if (status.isSymlink()) {
|
||||
return SYMLINK;
|
||||
}
|
||||
return FILE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1016,15 +1016,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
public FileStatus getFileStatus(Path f) throws IOException {
|
||||
statistics.incrementReadOps(1);
|
||||
storageStatistics.incrementOpCounter(OpType.GET_FILE_STATUS);
|
||||
return makeQualified(getHdfsFileStatus(f), f);
|
||||
}
|
||||
|
||||
private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
|
||||
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
|
||||
f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
|
||||
f.getPermission(), f.getOwner(), f.getGroup(),
|
||||
f.isSymlink() ? new Path(f.getSymlink()) : null,
|
||||
f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
|
||||
return getHdfsFileStatus(f).makeQualified(getUri(), f);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1507,6 +1499,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
statistics.incrementReadOps(1);
|
||||
storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
|
||||
|
||||
final URI fsUri = getUri();
|
||||
final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
|
||||
return new FsPathResponseRunner<FileStatus[]>(op, f) {
|
||||
@Override
|
||||
|
@ -1515,7 +1508,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
JsonUtilClient.toHdfsFileStatusArray(json);
|
||||
final FileStatus[] statuses = new FileStatus[hdfsStatuses.length];
|
||||
for (int i = 0; i < hdfsStatuses.length; i++) {
|
||||
statuses[i] = makeQualified(hdfsStatuses[i], f);
|
||||
statuses[i] = hdfsStatuses[i].makeQualified(fsUri, f);
|
||||
}
|
||||
|
||||
return statuses;
|
||||
|
@ -1541,10 +1534,11 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
}
|
||||
}.run();
|
||||
// Qualify the returned FileStatus array
|
||||
final URI fsUri = getUri();
|
||||
final HdfsFileStatus[] statuses = listing.getPartialListing();
|
||||
FileStatus[] qualified = new FileStatus[statuses.length];
|
||||
for (int i = 0; i < statuses.length; i++) {
|
||||
qualified[i] = makeQualified(statuses[i], f);
|
||||
qualified[i] = statuses[i].makeQualified(fsUri, f);
|
||||
}
|
||||
return new DirectoryEntries(qualified, listing.getLastName(),
|
||||
listing.hasMore());
|
||||
|
|
|
@ -21,7 +21,12 @@ option java_outer_classname = "AclProtos";
|
|||
option java_generate_equals_and_hash = true;
|
||||
package hadoop.hdfs;
|
||||
|
||||
import "hdfs.proto";
|
||||
/**
|
||||
* File or Directory permision - same spec as posix
|
||||
*/
|
||||
message FsPermissionProto {
|
||||
required uint32 perm = 1; // Actually a short - only 16bits used
|
||||
}
|
||||
|
||||
message AclEntryProto {
|
||||
enum AclEntryScopeProto {
|
||||
|
|
|
@ -25,7 +25,7 @@ import "hdfs.proto";
|
|||
|
||||
message SetErasureCodingPolicyRequestProto {
|
||||
required string src = 1;
|
||||
required string ecPolicyName = 2;
|
||||
optional string ecPolicyName = 2;
|
||||
}
|
||||
|
||||
message SetErasureCodingPolicyResponseProto {
|
||||
|
|
|
@ -32,6 +32,7 @@ option java_generate_equals_and_hash = true;
|
|||
package hadoop.hdfs;
|
||||
|
||||
import "Security.proto";
|
||||
import "acl.proto";
|
||||
|
||||
/**
|
||||
* Extended block idenfies a block
|
||||
|
@ -198,13 +199,6 @@ message CorruptFileBlocksProto {
|
|||
required string cookie = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* File or Directory permision - same spec as posix
|
||||
*/
|
||||
message FsPermissionProto {
|
||||
required uint32 perm = 1; // Actually a short - only 16bits used
|
||||
}
|
||||
|
||||
/**
|
||||
* Types of recognized storage media.
|
||||
*/
|
||||
|
@ -390,6 +384,11 @@ message HdfsFileStatusProto {
|
|||
IS_FILE = 2;
|
||||
IS_SYMLINK = 3;
|
||||
}
|
||||
enum Flags {
|
||||
HAS_ACL = 0x01; // has ACLs
|
||||
HAS_CRYPT = 0x02; // encrypted
|
||||
HAS_EC = 0x04; // erasure coded
|
||||
}
|
||||
required FileType fileType = 1;
|
||||
required bytes path = 2; // local name of inode encoded java UTF8
|
||||
required uint64 length = 3;
|
||||
|
@ -417,6 +416,9 @@ message HdfsFileStatusProto {
|
|||
|
||||
// Optional field for erasure coding
|
||||
optional ErasureCodingPolicyProto ecPolicy = 17;
|
||||
|
||||
// Set of flags
|
||||
optional uint32 flags = 18 [default = 0];
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -124,6 +124,8 @@ public class HttpFSFileSystem extends FileSystem
|
|||
public static final String POLICY_NAME_PARAM = "storagepolicy";
|
||||
public static final String OFFSET_PARAM = "offset";
|
||||
public static final String LENGTH_PARAM = "length";
|
||||
public static final String SNAPSHOT_NAME_PARAM = "snapshotname";
|
||||
public static final String OLD_SNAPSHOT_NAME_PARAM = "oldsnapshotname";
|
||||
|
||||
public static final Short DEFAULT_PERMISSION = 0755;
|
||||
public static final String ACLSPEC_DEFAULT = "";
|
||||
|
@ -144,6 +146,8 @@ public class HttpFSFileSystem extends FileSystem
|
|||
|
||||
public static final String UPLOAD_CONTENT_TYPE= "application/octet-stream";
|
||||
|
||||
public static final String SNAPSHOT_JSON = "Path";
|
||||
|
||||
public enum FILE_TYPE {
|
||||
FILE, DIRECTORY, SYMLINK;
|
||||
|
||||
|
@ -229,7 +233,9 @@ public class HttpFSFileSystem extends FileSystem
|
|||
DELETE(HTTP_DELETE), SETXATTR(HTTP_PUT), GETXATTRS(HTTP_GET),
|
||||
REMOVEXATTR(HTTP_PUT), LISTXATTRS(HTTP_GET), LISTSTATUS_BATCH(HTTP_GET),
|
||||
GETALLSTORAGEPOLICY(HTTP_GET), GETSTORAGEPOLICY(HTTP_GET),
|
||||
SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST);
|
||||
SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST),
|
||||
CREATESNAPSHOT(HTTP_PUT), DELETESNAPSHOT(HTTP_DELETE),
|
||||
RENAMESNAPSHOT(HTTP_PUT);
|
||||
|
||||
private String httpMethod;
|
||||
|
||||
|
@ -1047,18 +1053,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
/** Convert a string to a FsPermission object. */
|
||||
static FsPermission toFsPermission(JSONObject json) {
|
||||
final String s = (String) json.get(PERMISSION_JSON);
|
||||
final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON);
|
||||
final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON);
|
||||
final Boolean erasureBit = (Boolean) json.get(EC_BIT_JSON);
|
||||
FsPermission perm = new FsPermission(Short.parseShort(s, 8));
|
||||
final boolean aBit = (aclBit != null) ? aclBit : false;
|
||||
final boolean eBit = (encBit != null) ? encBit : false;
|
||||
final boolean ecBit = (erasureBit != null) ? erasureBit : false;
|
||||
if (aBit || eBit || ecBit) {
|
||||
return new FsPermissionExtension(perm, aBit, eBit, ecBit);
|
||||
} else {
|
||||
return perm;
|
||||
}
|
||||
return new FsPermission(Short.parseShort(s, 8));
|
||||
}
|
||||
|
||||
private FileStatus createFileStatus(Path parent, JSONObject json) {
|
||||
|
@ -1073,23 +1068,23 @@ public class HttpFSFileSystem extends FileSystem
|
|||
long mTime = (Long) json.get(MODIFICATION_TIME_JSON);
|
||||
long blockSize = (Long) json.get(BLOCK_SIZE_JSON);
|
||||
short replication = ((Long) json.get(REPLICATION_JSON)).shortValue();
|
||||
FileStatus fileStatus = null;
|
||||
|
||||
switch (type) {
|
||||
case FILE:
|
||||
case DIRECTORY:
|
||||
fileStatus = new FileStatus(len, (type == FILE_TYPE.DIRECTORY),
|
||||
replication, blockSize, mTime, aTime,
|
||||
permission, owner, group, path);
|
||||
break;
|
||||
case SYMLINK:
|
||||
Path symLink = null;
|
||||
fileStatus = new FileStatus(len, false,
|
||||
replication, blockSize, mTime, aTime,
|
||||
permission, owner, group, symLink,
|
||||
path);
|
||||
final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON);
|
||||
final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON);
|
||||
final Boolean erasureBit = (Boolean) json.get(EC_BIT_JSON);
|
||||
final boolean aBit = (aclBit != null) ? aclBit : false;
|
||||
final boolean eBit = (encBit != null) ? encBit : false;
|
||||
final boolean ecBit = (erasureBit != null) ? erasureBit : false;
|
||||
if (aBit || eBit || ecBit) {
|
||||
// include this for compatibility with 2.x
|
||||
FsPermissionExtension deprecatedPerm =
|
||||
new FsPermissionExtension(permission, aBit, eBit, ecBit);
|
||||
return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
|
||||
replication, blockSize, mTime, aTime, deprecatedPerm, owner, group,
|
||||
null, path, aBit, eBit, ecBit);
|
||||
}
|
||||
return fileStatus;
|
||||
return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
|
||||
replication, blockSize, mTime, aTime, permission, owner, group, path);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1445,4 +1440,43 @@ public class HttpFSFileSystem extends FileSystem
|
|||
Operation.UNSETSTORAGEPOLICY.getMethod(), params, src, true);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Path createSnapshot(Path path, String snapshotName)
|
||||
throws IOException {
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
params.put(OP_PARAM, Operation.CREATESNAPSHOT.toString());
|
||||
if (snapshotName != null) {
|
||||
params.put(SNAPSHOT_NAME_PARAM, snapshotName);
|
||||
}
|
||||
HttpURLConnection conn = getConnection(Operation.CREATESNAPSHOT.getMethod(),
|
||||
params, path, true);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
return new Path((String) json.get(SNAPSHOT_JSON));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void renameSnapshot(Path path, String snapshotOldName,
|
||||
String snapshotNewName) throws IOException {
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
params.put(OP_PARAM, Operation.RENAMESNAPSHOT.toString());
|
||||
params.put(SNAPSHOT_NAME_PARAM, snapshotNewName);
|
||||
params.put(OLD_SNAPSHOT_NAME_PARAM, snapshotOldName);
|
||||
HttpURLConnection conn = getConnection(Operation.RENAMESNAPSHOT.getMethod(),
|
||||
params, path, true);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteSnapshot(Path path, String snapshotName)
|
||||
throws IOException {
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
params.put(OP_PARAM, Operation.DELETESNAPSHOT.toString());
|
||||
params.put(SNAPSHOT_NAME_PARAM, snapshotName);
|
||||
HttpURLConnection conn = getConnection(Operation.DELETESNAPSHOT.getMethod(),
|
||||
params, path, true);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -669,7 +669,7 @@ public class FSOperations {
|
|||
/**
|
||||
* Creates a list-status executor.
|
||||
*
|
||||
* @param path the directory to retrieve the status of its contents.
|
||||
* @param path the directory/file to retrieve the status of its contents.
|
||||
* @param filter glob filter to use.
|
||||
*
|
||||
* @throws IOException thrown if the filter expression is incorrect.
|
||||
|
@ -1492,4 +1492,109 @@ public class FSOperations {
|
|||
return JsonUtil.toJsonMap(locations);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executor that performs a createSnapshot FileSystemAccess operation.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public static class FSCreateSnapshot implements
|
||||
FileSystemAccess.FileSystemExecutor<String> {
|
||||
|
||||
private Path path;
|
||||
private String snapshotName;
|
||||
|
||||
/**
|
||||
* Creates a createSnapshot executor.
|
||||
* @param path directory path to be snapshotted.
|
||||
* @param snapshotName the snapshot name.
|
||||
*/
|
||||
public FSCreateSnapshot(String path, String snapshotName) {
|
||||
this.path = new Path(path);
|
||||
this.snapshotName = snapshotName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the filesystem operation.
|
||||
* @param fs filesystem instance to use.
|
||||
* @return <code>Path</code> the complete path for newly created snapshot
|
||||
* @throws IOException thrown if an IO error occurred.
|
||||
*/
|
||||
@Override
|
||||
public String execute(FileSystem fs) throws IOException {
|
||||
Path snapshotPath = fs.createSnapshot(path, snapshotName);
|
||||
JSONObject json = toJSON(HttpFSFileSystem.HOME_DIR_JSON,
|
||||
snapshotPath.toString());
|
||||
return json.toJSONString().replaceAll("\\\\", "");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executor that performs a deleteSnapshot FileSystemAccess operation.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public static class FSDeleteSnapshot implements
|
||||
FileSystemAccess.FileSystemExecutor<Void> {
|
||||
|
||||
private Path path;
|
||||
private String snapshotName;
|
||||
|
||||
/**
|
||||
* Creates a deleteSnapshot executor.
|
||||
* @param path path for the snapshot to be deleted.
|
||||
* @param snapshotName snapshot name.
|
||||
*/
|
||||
public FSDeleteSnapshot(String path, String snapshotName) {
|
||||
this.path = new Path(path);
|
||||
this.snapshotName = snapshotName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the filesystem operation.
|
||||
* @param fs filesystem instance to use.
|
||||
* @return void
|
||||
* @throws IOException thrown if an IO error occurred.
|
||||
*/
|
||||
@Override
|
||||
public Void execute(FileSystem fs) throws IOException {
|
||||
fs.deleteSnapshot(path, snapshotName);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executor that performs a renameSnapshot FileSystemAccess operation.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public static class FSRenameSnapshot implements
|
||||
FileSystemAccess.FileSystemExecutor<Void> {
|
||||
private Path path;
|
||||
private String oldSnapshotName;
|
||||
private String snapshotName;
|
||||
|
||||
/**
|
||||
* Creates a renameSnapshot executor.
|
||||
* @param path directory path of the snapshot to be renamed.
|
||||
* @param oldSnapshotName current snapshot name.
|
||||
* @param snapshotName new snapshot name to be set.
|
||||
*/
|
||||
public FSRenameSnapshot(String path, String oldSnapshotName,
|
||||
String snapshotName) {
|
||||
this.path = new Path(path);
|
||||
this.oldSnapshotName = oldSnapshotName;
|
||||
this.snapshotName = snapshotName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the filesystem operation.
|
||||
* @param fs filesystem instance to use.
|
||||
* @return void
|
||||
* @throws IOException thrown if an IO error occurred.
|
||||
*/
|
||||
@Override
|
||||
public Void execute(FileSystem fs) throws IOException {
|
||||
fs.renameSnapshot(path, oldSnapshotName, snapshotName);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -100,6 +100,13 @@ public class HttpFSParametersProvider extends ParametersProvider {
|
|||
PARAMS_DEF.put(Operation.SETSTORAGEPOLICY,
|
||||
new Class[] {PolicyNameParam.class});
|
||||
PARAMS_DEF.put(Operation.UNSETSTORAGEPOLICY, new Class[] {});
|
||||
PARAMS_DEF.put(Operation.CREATESNAPSHOT,
|
||||
new Class[] {SnapshotNameParam.class});
|
||||
PARAMS_DEF.put(Operation.DELETESNAPSHOT,
|
||||
new Class[] {SnapshotNameParam.class});
|
||||
PARAMS_DEF.put(Operation.RENAMESNAPSHOT,
|
||||
new Class[] {OldSnapshotNameParam.class,
|
||||
SnapshotNameParam.class});
|
||||
}
|
||||
|
||||
public HttpFSParametersProvider() {
|
||||
|
@ -565,4 +572,42 @@ public class HttpFSParametersProvider extends ParametersProvider {
|
|||
super(NAME, null);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Class for SnapshotName parameter.
|
||||
*/
|
||||
public static class SnapshotNameParam extends StringParam {
|
||||
|
||||
/**
|
||||
* Parameter name.
|
||||
*/
|
||||
public static final String NAME = HttpFSFileSystem.SNAPSHOT_NAME_PARAM;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
*/
|
||||
public SnapshotNameParam() {
|
||||
super(NAME, null);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Class for OldSnapshotName parameter.
|
||||
*/
|
||||
public static class OldSnapshotNameParam extends StringParam {
|
||||
|
||||
/**
|
||||
* Parameter name.
|
||||
*/
|
||||
public static final String NAME = HttpFSFileSystem.OLD_SNAPSHOT_NAME_PARAM;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
*/
|
||||
public OldSnapshotNameParam() {
|
||||
super(NAME, null);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam;
|
|||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.NewLengthParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OldSnapshotNameParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam;
|
||||
|
@ -45,6 +46,7 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.PolicyNameParam
|
|||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SourcesParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SnapshotNameParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrEncodingParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrNameParam;
|
||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrSetFlagParam;
|
||||
|
@ -430,6 +432,16 @@ public class HttpFSServer {
|
|||
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
|
||||
break;
|
||||
}
|
||||
case DELETESNAPSHOT: {
|
||||
String snapshotName = params.get(SnapshotNameParam.NAME,
|
||||
SnapshotNameParam.class);
|
||||
FSOperations.FSDeleteSnapshot command =
|
||||
new FSOperations.FSDeleteSnapshot(path, snapshotName);
|
||||
fsExecute(user, command);
|
||||
AUDIT_LOG.info("[{}] deleted snapshot [{}]", path, snapshotName);
|
||||
response = Response.ok().build();
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
throw new IOException(
|
||||
MessageFormat.format("Invalid HTTP DELETE operation [{0}]",
|
||||
|
@ -602,6 +614,16 @@ public class HttpFSServer {
|
|||
}
|
||||
break;
|
||||
}
|
||||
case CREATESNAPSHOT: {
|
||||
String snapshotName = params.get(SnapshotNameParam.NAME,
|
||||
SnapshotNameParam.class);
|
||||
FSOperations.FSCreateSnapshot command =
|
||||
new FSOperations.FSCreateSnapshot(path, snapshotName);
|
||||
String json = fsExecute(user, command);
|
||||
AUDIT_LOG.info("[{}] snapshot created as [{}]", path, snapshotName);
|
||||
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
|
||||
break;
|
||||
}
|
||||
case SETXATTR: {
|
||||
String xattrName = params.get(XAttrNameParam.NAME,
|
||||
XAttrNameParam.class);
|
||||
|
@ -617,6 +639,20 @@ public class HttpFSServer {
|
|||
response = Response.ok().build();
|
||||
break;
|
||||
}
|
||||
case RENAMESNAPSHOT: {
|
||||
String oldSnapshotName = params.get(OldSnapshotNameParam.NAME,
|
||||
OldSnapshotNameParam.class);
|
||||
String snapshotName = params.get(SnapshotNameParam.NAME,
|
||||
SnapshotNameParam.class);
|
||||
FSOperations.FSRenameSnapshot command =
|
||||
new FSOperations.FSRenameSnapshot(path, oldSnapshotName,
|
||||
snapshotName);
|
||||
fsExecute(user, command);
|
||||
AUDIT_LOG.info("[{}] renamed snapshot [{}] to [{}]", path,
|
||||
oldSnapshotName, snapshotName);
|
||||
response = Response.ok().build();
|
||||
break;
|
||||
}
|
||||
case REMOVEXATTR: {
|
||||
String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class);
|
||||
FSOperations.FSRemoveXAttr command = new FSOperations.FSRemoveXAttr(
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue