Merge commit '456e901a4c5c639267ee87b8e5f1319f256d20c2' (HDFS-6407. Add sorting and pagination in the datanode tab of the NN Web UI. Contributed by Haohui Mai.) into HDFS-7285-merge
|
@ -22,5 +22,6 @@ hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
|
|||
hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
|
||||
yarnregistry.pdf
|
||||
hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
|
||||
hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
|
||||
patchprocess/
|
||||
|
|
39
BUILDING.txt
|
@ -10,15 +10,17 @@ Requirements:
|
|||
* ProtocolBuffer 2.5.0
|
||||
* CMake 2.6 or newer (if compiling native code), must be 3.0 or newer on Mac
|
||||
* Zlib devel (if compiling native code)
|
||||
* openssl devel ( if compiling native hadoop-pipes and to get the best HDFS encryption performance )
|
||||
* Jansson C XML parsing library ( if compiling libwebhdfs )
|
||||
* Linux FUSE (Filesystem in Userspace) version 2.6 or above ( if compiling fuse_dfs )
|
||||
* openssl devel (if compiling native hadoop-pipes and to get the best HDFS encryption performance)
|
||||
* Jansson C XML parsing library (if compiling libwebhdfs)
|
||||
* Linux FUSE (Filesystem in Userspace) version 2.6 or above (if compiling fuse_dfs)
|
||||
* Internet connection for first build (to fetch all Maven and Hadoop dependencies)
|
||||
* python (for releasedocs)
|
||||
* bats (for shell code testing)
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
The easiest way to get an environment with all the appropriate tools is by means
|
||||
of the provided Docker config.
|
||||
This requires a recent version of docker ( 1.4.1 and higher are known to work ).
|
||||
This requires a recent version of docker (1.4.1 and higher are known to work).
|
||||
|
||||
On Linux:
|
||||
Install Docker and run this command:
|
||||
|
@ -49,8 +51,8 @@ Known issues:
|
|||
This issue has been resolved as a duplicate, and they point to a new feature for utilizing NFS mounts
|
||||
as the proposed solution:
|
||||
https://github.com/boot2docker/boot2docker/issues/64
|
||||
An alternative solution to this problem is when you install Linux native inside a virtual machine
|
||||
and run your IDE and Docker etc in side that VM.
|
||||
An alternative solution to this problem is to install Linux native inside a virtual machine
|
||||
and run your IDE and Docker etc inside that VM.
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
Installing required packages for clean install of Ubuntu 14.04 LTS Desktop:
|
||||
|
@ -106,7 +108,7 @@ Maven build goals:
|
|||
|
||||
* Clean : mvn clean [-Preleasedocs]
|
||||
* Compile : mvn compile [-Pnative]
|
||||
* Run tests : mvn test [-Pnative]
|
||||
* Run tests : mvn test [-Pnative] [-Pshelltest]
|
||||
* Create JAR : mvn package
|
||||
* Run findbugs : mvn compile findbugs:findbugs
|
||||
* Run checkstyle : mvn compile checkstyle:checkstyle
|
||||
|
@ -255,7 +257,7 @@ Handling out of memory errors in builds
|
|||
----------------------------------------------------------------------------------
|
||||
|
||||
If the build process fails with an out of memory error, you should be able to fix
|
||||
it by increasing the memory used by maven -which can be done via the environment
|
||||
it by increasing the memory used by maven which can be done via the environment
|
||||
variable MAVEN_OPTS.
|
||||
|
||||
Here is an example setting to allocate between 256 and 512 MB of heap space to
|
||||
|
@ -282,9 +284,10 @@ Requirements:
|
|||
* Internet connection for first build (to fetch all Maven and Hadoop dependencies)
|
||||
* Unix command-line tools from GnuWin32: sh, mkdir, rm, cp, tar, gzip. These
|
||||
tools must be present on your PATH.
|
||||
* Python ( for generation of docs using 'mvn site')
|
||||
|
||||
Unix command-line tools are also included with the Windows Git package which
|
||||
can be downloaded from http://git-scm.com/download/win.
|
||||
can be downloaded from http://git-scm.com/downloads
|
||||
|
||||
If using Visual Studio, it must be Visual Studio 2010 Professional (not 2012).
|
||||
Do not use Visual Studio Express. It does not support compiling for 64-bit,
|
||||
|
@ -303,19 +306,19 @@ Cygwin is neither required nor supported.
|
|||
Building:
|
||||
|
||||
Keep the source code tree in a short path to avoid running into problems related
|
||||
to Windows maximum path length limitation. (For example, C:\hdc).
|
||||
to Windows maximum path length limitation (for example, C:\hdc).
|
||||
|
||||
Run builds from a Windows SDK Command Prompt. (Start, All Programs,
|
||||
Microsoft Windows SDK v7.1, Windows SDK 7.1 Command Prompt.)
|
||||
Run builds from a Windows SDK Command Prompt. (Start, All Programs,
|
||||
Microsoft Windows SDK v7.1, Windows SDK 7.1 Command Prompt).
|
||||
|
||||
JAVA_HOME must be set, and the path must not contain spaces. If the full path
|
||||
JAVA_HOME must be set, and the path must not contain spaces. If the full path
|
||||
would contain spaces, then use the Windows short path instead.
|
||||
|
||||
You must set the Platform environment variable to either x64 or Win32 depending
|
||||
on whether you're running a 64-bit or 32-bit system. Note that this is
|
||||
case-sensitive. It must be "Platform", not "PLATFORM" or "platform".
|
||||
on whether you're running a 64-bit or 32-bit system. Note that this is
|
||||
case-sensitive. It must be "Platform", not "PLATFORM" or "platform".
|
||||
Environment variables on Windows are usually case-insensitive, but Maven treats
|
||||
them as case-sensitive. Failure to set this environment variable correctly will
|
||||
them as case-sensitive. Failure to set this environment variable correctly will
|
||||
cause msbuild to fail while building the native code in hadoop-common.
|
||||
|
||||
set Platform=x64 (when building on a 64-bit system)
|
||||
|
@ -330,12 +333,12 @@ is enabled by default when building on Windows since the native components
|
|||
are required (not optional) on Windows.
|
||||
|
||||
If native code bindings for zlib are required, then the zlib headers must be
|
||||
deployed on the build machine. Set the ZLIB_HOME environment variable to the
|
||||
deployed on the build machine. Set the ZLIB_HOME environment variable to the
|
||||
directory containing the headers.
|
||||
|
||||
set ZLIB_HOME=C:\zlib-1.2.7
|
||||
|
||||
At runtime, zlib1.dll must be accessible on the PATH. Hadoop has been tested
|
||||
At runtime, zlib1.dll must be accessible on the PATH. Hadoop has been tested
|
||||
with zlib 1.2.7, built using Visual Studio 2010 out of contrib\vstudio\vc10 in
|
||||
the zlib 1.2.7 source tree.
|
||||
|
||||
|
|
|
@ -62,12 +62,19 @@ import time
|
|||
DEFAULT_JENKINS_URL = "https://builds.apache.org"
|
||||
DEFAULT_JOB_NAME = "Hadoop-Common-trunk"
|
||||
DEFAULT_NUM_PREVIOUS_DAYS = 14
|
||||
DEFAULT_TOP_NUM_FAILED_TEST = -1
|
||||
|
||||
SECONDS_PER_DAY = 86400
|
||||
|
||||
# total number of runs to examine
|
||||
numRunsToExamine = 0
|
||||
|
||||
#summary mode
|
||||
summary_mode = False
|
||||
|
||||
#total number of errors
|
||||
error_count = 0
|
||||
|
||||
""" Parse arguments """
|
||||
def parse_args():
|
||||
parser = OptionParser()
|
||||
|
@ -80,6 +87,10 @@ def parse_args():
|
|||
parser.add_option("-n", "--num-days", type="int",
|
||||
dest="num_prev_days", help="Number of days to examine",
|
||||
default=DEFAULT_NUM_PREVIOUS_DAYS)
|
||||
parser.add_option("-t", "--top", type="int",
|
||||
dest="num_failed_tests",
|
||||
help="Summary Mode, only show top number of failed tests",
|
||||
default=DEFAULT_TOP_NUM_FAILED_TEST)
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
if args:
|
||||
|
@ -100,6 +111,7 @@ def load_url_data(url):
|
|||
|
||||
""" List all builds of the target project. """
|
||||
def list_builds(jenkins_url, job_name):
|
||||
global summary_mode
|
||||
url = "%(jenkins)s/job/%(job_name)s/api/json?tree=builds[url,result,timestamp]" % dict(
|
||||
jenkins=jenkins_url,
|
||||
job_name=job_name)
|
||||
|
@ -108,19 +120,25 @@ def list_builds(jenkins_url, job_name):
|
|||
data = load_url_data(url)
|
||||
|
||||
except:
|
||||
logging.error("Could not fetch: %s" % url)
|
||||
if not summary_mode:
|
||||
logging.error("Could not fetch: %s" % url)
|
||||
error_count += 1
|
||||
raise
|
||||
return data['builds']
|
||||
|
||||
""" Find the names of any tests which failed in the given build output URL. """
|
||||
def find_failing_tests(testReportApiJson, jobConsoleOutput):
|
||||
global summary_mode
|
||||
global error_count
|
||||
ret = set()
|
||||
try:
|
||||
data = load_url_data(testReportApiJson)
|
||||
|
||||
except:
|
||||
logging.error(" Could not open testReport, check " +
|
||||
if not summary_mode:
|
||||
logging.error(" Could not open testReport, check " +
|
||||
jobConsoleOutput + " for why it was reported failed")
|
||||
error_count += 1
|
||||
return ret
|
||||
|
||||
for suite in data['suites']:
|
||||
|
@ -130,7 +148,7 @@ def find_failing_tests(testReportApiJson, jobConsoleOutput):
|
|||
if (status == 'REGRESSION' or status == 'FAILED' or (errDetails is not None)):
|
||||
ret.add(cs['className'] + "." + cs['name'])
|
||||
|
||||
if len(ret) == 0:
|
||||
if len(ret) == 0 and (not summary_mode):
|
||||
logging.info(" No failed tests in testReport, check " +
|
||||
jobConsoleOutput + " for why it was reported failed.")
|
||||
return ret
|
||||
|
@ -138,6 +156,7 @@ def find_failing_tests(testReportApiJson, jobConsoleOutput):
|
|||
""" Iterate runs of specfied job within num_prev_days and collect results """
|
||||
def find_flaky_tests(jenkins_url, job_name, num_prev_days):
|
||||
global numRunsToExamine
|
||||
global summary_mode
|
||||
all_failing = dict()
|
||||
# First list all builds
|
||||
builds = list_builds(jenkins_url, job_name)
|
||||
|
@ -153,7 +172,8 @@ def find_flaky_tests(jenkins_url, job_name, num_prev_days):
|
|||
tnum = len(builds)
|
||||
num = len(failing_build_urls)
|
||||
numRunsToExamine = tnum
|
||||
logging.info(" THERE ARE " + str(num) + " builds (out of " + str(tnum)
|
||||
if not summary_mode:
|
||||
logging.info(" THERE ARE " + str(num) + " builds (out of " + str(tnum)
|
||||
+ ") that have failed tests in the past " + str(num_prev_days) + " days"
|
||||
+ ((".", ", as listed below:\n")[num > 0]))
|
||||
|
||||
|
@ -165,17 +185,20 @@ def find_flaky_tests(jenkins_url, job_name, num_prev_days):
|
|||
|
||||
ts = float(failed_build_with_time[1]) / 1000.
|
||||
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
|
||||
logging.info("===>%s" % str(testReport) + " (" + st + ")")
|
||||
if not summary_mode:
|
||||
logging.info("===>%s" % str(testReport) + " (" + st + ")")
|
||||
failing = find_failing_tests(testReportApiJson, jobConsoleOutput)
|
||||
if failing:
|
||||
for ftest in failing:
|
||||
logging.info(" Failed test: %s" % ftest)
|
||||
if not summary_mode:
|
||||
logging.info(" Failed test: %s" % ftest)
|
||||
all_failing[ftest] = all_failing.get(ftest,0)+1
|
||||
|
||||
return all_failing
|
||||
|
||||
def main():
|
||||
global numRunsToExamine
|
||||
global summary_mode
|
||||
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
|
||||
|
||||
# set up logger to write to stdout
|
||||
|
@ -189,16 +212,34 @@ def main():
|
|||
logging.info("****Recently FAILED builds in url: " + opts.jenkins_url
|
||||
+ "/job/" + opts.job_name + "")
|
||||
|
||||
if opts.num_failed_tests != -1:
|
||||
summary_mode = True
|
||||
|
||||
all_failing = find_flaky_tests(opts.jenkins_url, opts.job_name,
|
||||
opts.num_prev_days)
|
||||
if len(all_failing) == 0:
|
||||
raise SystemExit(0)
|
||||
logging.info("\nAmong " + str(numRunsToExamine) + " runs examined, all failed "
|
||||
+ "tests <#failedRuns: testName>:")
|
||||
|
||||
if summary_mode and opts.num_failed_tests < len(all_failing):
|
||||
logging.info("\nAmong " + str(numRunsToExamine) +
|
||||
" runs examined, top " + str(opts.num_failed_tests) +
|
||||
" failed tests <#failedRuns: testName>:")
|
||||
else:
|
||||
logging.info("\nAmong " + str(numRunsToExamine) +
|
||||
" runs examined, all failed tests <#failedRuns: testName>:")
|
||||
|
||||
# print summary section: all failed tests sorted by how many times they failed
|
||||
line_count = 0
|
||||
for tn in sorted(all_failing, key=all_failing.get, reverse=True):
|
||||
logging.info(" " + str(all_failing[tn])+ ": " + tn)
|
||||
if summary_mode:
|
||||
line_count += 1
|
||||
if line_count == opts.num_failed_tests:
|
||||
break
|
||||
|
||||
if summary_mode and error_count > 0:
|
||||
logging.info("\n" + str(error_count) + " errors found, you may "
|
||||
+ "re-run in non summary mode to see error details.");
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -63,6 +63,14 @@ ENV FINDBUGS_HOME /opt/findbugs
|
|||
RUN apt-get install -y cabal-install
|
||||
RUN cabal update && cabal install shellcheck --global
|
||||
|
||||
#####
|
||||
# bats
|
||||
#####
|
||||
|
||||
RUN add-apt-repository ppa:duggan/bats --yes
|
||||
RUN apt-get update -qq
|
||||
RUN apt-get install -qq bats
|
||||
|
||||
# Fixing the Apache commons / Maven dependency problem under Ubuntu:
|
||||
# See http://wiki.apache.org/commons/VfsProblems
|
||||
RUN cd /usr/share/maven/lib && ln -s ../../java/commons-lang.jar .
|
||||
|
|
|
@ -19,10 +19,12 @@
|
|||
from glob import glob
|
||||
from optparse import OptionParser
|
||||
from time import gmtime, strftime
|
||||
import pprint
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urllib2
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
|
@ -87,28 +89,56 @@ def notableclean(str):
|
|||
str=str.rstrip()
|
||||
return str
|
||||
|
||||
# clean output dir
|
||||
def cleanOutputDir(dir):
|
||||
files = os.listdir(dir)
|
||||
for name in files:
|
||||
os.remove(os.path.join(dir,name))
|
||||
os.rmdir(dir)
|
||||
|
||||
def mstr(obj):
|
||||
if (obj == None):
|
||||
if (obj is None):
|
||||
return ""
|
||||
return unicode(obj)
|
||||
|
||||
def buildindex(master):
|
||||
def buildindex(title,license):
|
||||
versions=reversed(sorted(glob("[0-9]*.[0-9]*.[0-9]*")))
|
||||
with open("index.md","w") as indexfile:
|
||||
if license is True:
|
||||
indexfile.write(asflicense)
|
||||
for v in versions:
|
||||
indexfile.write("* Apache Hadoop v%s\n" % (v))
|
||||
indexfile.write("* %s v%s\n" % (title,v))
|
||||
for k in ("Changes","Release Notes"):
|
||||
indexfile.write(" * %s\n" %(k))
|
||||
indexfile.write(" * [Combined %s](%s/%s.%s.html)\n" \
|
||||
indexfile.write(" * %s (%s/%s.%s.html)\n" \
|
||||
% (k,v,k.upper().replace(" ",""),v))
|
||||
if not master:
|
||||
indexfile.write(" * [Hadoop Common %s](%s/%s.HADOOP.%s.html)\n" \
|
||||
% (k,v,k.upper().replace(" ",""),v))
|
||||
for p in ("HDFS","MapReduce","YARN"):
|
||||
indexfile.write(" * [%s %s](%s/%s.%s.%s.html)\n" \
|
||||
% (p,k,v,k.upper().replace(" ",""),p.upper(),v))
|
||||
indexfile.close()
|
||||
|
||||
class GetVersions:
|
||||
""" yo """
|
||||
def __init__(self,versions, projects):
|
||||
versions = versions
|
||||
projects = projects
|
||||
self.newversions = []
|
||||
pp = pprint.PrettyPrinter(indent=4)
|
||||
at=0
|
||||
end=1
|
||||
count=100
|
||||
versions.sort()
|
||||
print "Looking for %s through %s"%(versions[0],versions[-1])
|
||||
for p in projects:
|
||||
resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/project/%s/versions"%p)
|
||||
data = json.loads(resp.read())
|
||||
for d in data:
|
||||
if d['name'][0].isdigit and versions[0] <= d['name'] and d['name'] <= versions[-1]:
|
||||
print "Adding %s to the list" % d['name']
|
||||
self.newversions.append(d['name'])
|
||||
newlist=list(set(self.newversions))
|
||||
self.newversions=newlist
|
||||
|
||||
def getlist(self):
|
||||
pp = pprint.PrettyPrinter(indent=4)
|
||||
return(self.newversions)
|
||||
|
||||
class Version:
|
||||
"""Represents a version number"""
|
||||
def __init__(self, data):
|
||||
|
@ -148,7 +178,7 @@ class Jira:
|
|||
return mstr(self.fields['description'])
|
||||
|
||||
def getReleaseNote(self):
|
||||
if (self.notes == None):
|
||||
if (self.notes is None):
|
||||
field = self.parent.fieldIdMap['Release Note']
|
||||
if (self.fields.has_key(field)):
|
||||
self.notes=mstr(self.fields[field])
|
||||
|
@ -159,14 +189,14 @@ class Jira:
|
|||
def getPriority(self):
|
||||
ret = ""
|
||||
pri = self.fields['priority']
|
||||
if(pri != None):
|
||||
if(pri is not None):
|
||||
ret = pri['name']
|
||||
return mstr(ret)
|
||||
|
||||
def getAssignee(self):
|
||||
ret = ""
|
||||
mid = self.fields['assignee']
|
||||
if(mid != None):
|
||||
if(mid is not None):
|
||||
ret = mid['displayName']
|
||||
return mstr(ret)
|
||||
|
||||
|
@ -182,21 +212,21 @@ class Jira:
|
|||
def getType(self):
|
||||
ret = ""
|
||||
mid = self.fields['issuetype']
|
||||
if(mid != None):
|
||||
if(mid is not None):
|
||||
ret = mid['name']
|
||||
return mstr(ret)
|
||||
|
||||
def getReporter(self):
|
||||
ret = ""
|
||||
mid = self.fields['reporter']
|
||||
if(mid != None):
|
||||
if(mid is not None):
|
||||
ret = mid['displayName']
|
||||
return mstr(ret)
|
||||
|
||||
def getProject(self):
|
||||
ret = ""
|
||||
mid = self.fields['project']
|
||||
if(mid != None):
|
||||
if(mid is not None):
|
||||
ret = mid['key']
|
||||
return mstr(ret)
|
||||
|
||||
|
@ -214,7 +244,7 @@ class Jira:
|
|||
return False
|
||||
|
||||
def getIncompatibleChange(self):
|
||||
if (self.incompat == None):
|
||||
if (self.incompat is None):
|
||||
field = self.parent.fieldIdMap['Hadoop Flags']
|
||||
self.reviewed=False
|
||||
self.incompat=False
|
||||
|
@ -227,6 +257,24 @@ class Jira:
|
|||
self.reviewed=True
|
||||
return self.incompat
|
||||
|
||||
def checkMissingComponent(self):
|
||||
if (len(self.fields['components'])>0):
|
||||
return False
|
||||
return True
|
||||
|
||||
def checkMissingAssignee(self):
|
||||
if (self.fields['assignee'] is not None):
|
||||
return False
|
||||
return True
|
||||
|
||||
def checkVersionString(self):
|
||||
field = self.parent.fieldIdMap['Fix Version/s']
|
||||
for h in self.fields[field]:
|
||||
found = re.match('^((\d+)(\.\d+)*).*$|^(\w+\-\d+)$', h['name'])
|
||||
if not found:
|
||||
return True
|
||||
return False
|
||||
|
||||
def getReleaseDate(self,version):
|
||||
for j in range(len(self.fields['fixVersions'])):
|
||||
if self.fields['fixVersions'][j]==version:
|
||||
|
@ -236,10 +284,12 @@ class Jira:
|
|||
class JiraIter:
|
||||
"""An Iterator of JIRAs"""
|
||||
|
||||
def __init__(self, versions):
|
||||
self.versions = versions
|
||||
def __init__(self, version, projects):
|
||||
self.version = version
|
||||
self.projects = projects
|
||||
v=str(version).replace("-SNAPSHOT","")
|
||||
|
||||
resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/field")
|
||||
resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/field")
|
||||
data = json.loads(resp.read())
|
||||
|
||||
self.fieldIdMap = {}
|
||||
|
@ -251,8 +301,8 @@ class JiraIter:
|
|||
end=1
|
||||
count=100
|
||||
while (at < end):
|
||||
params = urllib.urlencode({'jql': "project in (HADOOP,HDFS,MAPREDUCE,YARN) and fixVersion in ('"+"' , '".join([str(v).replace("-SNAPSHOT","") for v in versions])+"') and resolution = Fixed", 'startAt':at, 'maxResults':count})
|
||||
resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/search?%s"%params)
|
||||
params = urllib.urlencode({'jql': "project in ('"+"' , '".join(projects)+"') and fixVersion in ('"+v+"') and resolution = Fixed", 'startAt':at, 'maxResults':count})
|
||||
resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/search?%s"%params)
|
||||
data = json.loads(resp.read())
|
||||
if (data.has_key('errorMessages')):
|
||||
raise Exception(data['errorMessages'])
|
||||
|
@ -261,10 +311,8 @@ class JiraIter:
|
|||
self.jiras.extend(data['issues'])
|
||||
|
||||
needaversion=False
|
||||
for j in versions:
|
||||
v=str(j).replace("-SNAPSHOT","")
|
||||
if v not in releaseVersion:
|
||||
needaversion=True
|
||||
if v not in releaseVersion:
|
||||
needaversion=True
|
||||
|
||||
if needaversion is True:
|
||||
for i in range(len(data['issues'])):
|
||||
|
@ -326,22 +374,32 @@ class Outputs:
|
|||
self.writeKeyRaw(jira.getProject(), line)
|
||||
|
||||
def main():
|
||||
parser = OptionParser(usage="usage: %prog --version VERSION [--version VERSION2 ...]",
|
||||
parser = OptionParser(usage="usage: %prog --project PROJECT [--project PROJECT] --version VERSION [--version VERSION2 ...]",
|
||||
epilog=
|
||||
"Markdown-formatted CHANGES and RELEASENOTES files will be stored in a directory"
|
||||
" named after the highest version provided.")
|
||||
parser.add_option("-i","--index", dest="index", action="store_true",
|
||||
default=False, help="build an index file")
|
||||
parser.add_option("-l","--license", dest="license", action="store_false",
|
||||
default=True, help="Add an ASF license")
|
||||
parser.add_option("-n","--lint", dest="lint", action="store_true",
|
||||
help="use lint flag to exit on failures")
|
||||
parser.add_option("-p", "--project", dest="projects",
|
||||
action="append", type="string",
|
||||
help="projects in JIRA to include in releasenotes", metavar="PROJECT")
|
||||
parser.add_option("-r", "--range", dest="range", action="store_true",
|
||||
default=False, help="Given versions are a range")
|
||||
parser.add_option("-t", "--projecttitle", dest="title",
|
||||
type="string",
|
||||
help="Title to use for the project (default is Apache PROJECT)")
|
||||
parser.add_option("-u","--usetoday", dest="usetoday", action="store_true",
|
||||
default=False, help="use current date for unreleased versions")
|
||||
parser.add_option("-v", "--version", dest="versions",
|
||||
action="append", type="string",
|
||||
help="versions in JIRA to include in releasenotes", metavar="VERSION")
|
||||
parser.add_option("-m","--master", dest="master", action="store_true",
|
||||
help="only create the master, merged project files")
|
||||
parser.add_option("-i","--index", dest="index", action="store_true",
|
||||
help="build an index file")
|
||||
parser.add_option("-u","--usetoday", dest="usetoday", action="store_true",
|
||||
help="use current date for unreleased versions")
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if (options.versions == None):
|
||||
if (options.versions is None):
|
||||
options.versions = []
|
||||
|
||||
if (len(args) > 2):
|
||||
|
@ -350,140 +408,173 @@ def main():
|
|||
if (len(options.versions) <= 0):
|
||||
parser.error("At least one version needs to be supplied")
|
||||
|
||||
versions = [ Version(v) for v in options.versions ];
|
||||
proxy = urllib2.ProxyHandler()
|
||||
opener = urllib2.build_opener(proxy)
|
||||
urllib2.install_opener(opener)
|
||||
|
||||
projects = options.projects
|
||||
|
||||
if (options.range is True):
|
||||
versions = [ Version(v) for v in GetVersions(options.versions, projects).getlist() ]
|
||||
else:
|
||||
versions = [ Version(v) for v in options.versions ]
|
||||
versions.sort();
|
||||
|
||||
maxVersion = str(versions[-1])
|
||||
|
||||
jlist = JiraIter(versions)
|
||||
version = maxVersion
|
||||
|
||||
if version in releaseVersion:
|
||||
reldate=releaseVersion[version]
|
||||
elif options.usetoday:
|
||||
reldate=strftime("%Y-%m-%d", gmtime())
|
||||
if (options.title is None):
|
||||
title=projects[0]
|
||||
else:
|
||||
reldate="Unreleased"
|
||||
title=options.title
|
||||
|
||||
if not os.path.exists(version):
|
||||
os.mkdir(version)
|
||||
haderrors=False
|
||||
|
||||
if options.master:
|
||||
reloutputs = Outputs("%(ver)s/RELEASENOTES.%(ver)s.md",
|
||||
"%(ver)s/RELEASENOTES.%(key)s.%(ver)s.md",
|
||||
[], {"ver":maxVersion, "date":reldate})
|
||||
choutputs = Outputs("%(ver)s/CHANGES.%(ver)s.md",
|
||||
"%(ver)s/CHANGES.%(key)s.%(ver)s.md",
|
||||
[], {"ver":maxVersion, "date":reldate})
|
||||
else:
|
||||
reloutputs = Outputs("%(ver)s/RELEASENOTES.%(ver)s.md",
|
||||
"%(ver)s/RELEASENOTES.%(key)s.%(ver)s.md",
|
||||
["HADOOP","HDFS","MAPREDUCE","YARN"], {"ver":maxVersion, "date":reldate})
|
||||
choutputs = Outputs("%(ver)s/CHANGES.%(ver)s.md",
|
||||
"%(ver)s/CHANGES.%(key)s.%(ver)s.md",
|
||||
["HADOOP","HDFS","MAPREDUCE","YARN"], {"ver":maxVersion, "date":reldate})
|
||||
for v in versions:
|
||||
vstr=str(v)
|
||||
jlist = JiraIter(vstr,projects)
|
||||
|
||||
reloutputs.writeAll(asflicense)
|
||||
choutputs.writeAll(asflicense)
|
||||
|
||||
relhead = '# Hadoop %(key)s %(ver)s Release Notes\n\n' \
|
||||
'These release notes cover new developer and user-facing incompatibilities, features, and major improvements.\n\n'
|
||||
|
||||
chhead = '# Hadoop Changelog\n\n' \
|
||||
'## Release %(ver)s - %(date)s\n'\
|
||||
'\n'
|
||||
|
||||
reloutputs.writeAll(relhead)
|
||||
choutputs.writeAll(chhead)
|
||||
|
||||
incompatlist=[]
|
||||
buglist=[]
|
||||
improvementlist=[]
|
||||
newfeaturelist=[]
|
||||
subtasklist=[]
|
||||
tasklist=[]
|
||||
testlist=[]
|
||||
otherlist=[]
|
||||
|
||||
for jira in sorted(jlist):
|
||||
if jira.getIncompatibleChange():
|
||||
incompatlist.append(jira)
|
||||
elif jira.getType() == "Bug":
|
||||
buglist.append(jira)
|
||||
elif jira.getType() == "Improvement":
|
||||
improvementlist.append(jira)
|
||||
elif jira.getType() == "New Feature":
|
||||
newfeaturelist.append(jira)
|
||||
elif jira.getType() == "Sub-task":
|
||||
subtasklist.append(jira)
|
||||
elif jira.getType() == "Task":
|
||||
tasklist.append(jira)
|
||||
elif jira.getType() == "Test":
|
||||
testlist.append(jira)
|
||||
if vstr in releaseVersion:
|
||||
reldate=releaseVersion[vstr]
|
||||
elif options.usetoday:
|
||||
reldate=strftime("%Y-%m-%d", gmtime())
|
||||
else:
|
||||
otherlist.append(jira)
|
||||
reldate="Unreleased"
|
||||
|
||||
line = '* [%s](https://issues.apache.org/jira/browse/%s) | *%s* | **%s**\n' \
|
||||
% (notableclean(jira.getId()), notableclean(jira.getId()), notableclean(jira.getPriority()),
|
||||
notableclean(jira.getSummary()))
|
||||
if not os.path.exists(vstr):
|
||||
os.mkdir(vstr)
|
||||
|
||||
if (jira.getIncompatibleChange()) and (len(jira.getReleaseNote())==0):
|
||||
reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n")
|
||||
reloutputs.writeKeyRaw(jira.getProject(), line)
|
||||
line ='\n**WARNING: No release note provided for this incompatible change.**\n\n'
|
||||
print 'WARNING: incompatible change %s lacks release notes.' % (notableclean(jira.getId()))
|
||||
reloutputs.writeKeyRaw(jira.getProject(), line)
|
||||
reloutputs = Outputs("%(ver)s/RELEASENOTES.%(ver)s.md",
|
||||
"%(ver)s/RELEASENOTES.%(key)s.%(ver)s.md",
|
||||
[], {"ver":v, "date":reldate, "title":title})
|
||||
choutputs = Outputs("%(ver)s/CHANGES.%(ver)s.md",
|
||||
"%(ver)s/CHANGES.%(key)s.%(ver)s.md",
|
||||
[], {"ver":v, "date":reldate, "title":title})
|
||||
|
||||
if (len(jira.getReleaseNote())>0):
|
||||
reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n")
|
||||
reloutputs.writeKeyRaw(jira.getProject(), line)
|
||||
line ='\n%s\n\n' % (tableclean(jira.getReleaseNote()))
|
||||
reloutputs.writeKeyRaw(jira.getProject(), line)
|
||||
if (options.license is True):
|
||||
reloutputs.writeAll(asflicense)
|
||||
choutputs.writeAll(asflicense)
|
||||
|
||||
reloutputs.writeAll("\n\n")
|
||||
reloutputs.close()
|
||||
relhead = '# %(title)s %(key)s %(ver)s Release Notes\n\n' \
|
||||
'These release notes cover new developer and user-facing incompatibilities, features, and major improvements.\n\n'
|
||||
chhead = '# %(title)s Changelog\n\n' \
|
||||
'## Release %(ver)s - %(date)s\n'\
|
||||
'\n'
|
||||
|
||||
choutputs.writeAll("### INCOMPATIBLE CHANGES:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(incompatlist)
|
||||
reloutputs.writeAll(relhead)
|
||||
choutputs.writeAll(chhead)
|
||||
errorCount=0
|
||||
warningCount=0
|
||||
lintMessage=""
|
||||
incompatlist=[]
|
||||
buglist=[]
|
||||
improvementlist=[]
|
||||
newfeaturelist=[]
|
||||
subtasklist=[]
|
||||
tasklist=[]
|
||||
testlist=[]
|
||||
otherlist=[]
|
||||
|
||||
choutputs.writeAll("\n\n### NEW FEATURES:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(newfeaturelist)
|
||||
for jira in sorted(jlist):
|
||||
if jira.getIncompatibleChange():
|
||||
incompatlist.append(jira)
|
||||
elif jira.getType() == "Bug":
|
||||
buglist.append(jira)
|
||||
elif jira.getType() == "Improvement":
|
||||
improvementlist.append(jira)
|
||||
elif jira.getType() == "New Feature":
|
||||
newfeaturelist.append(jira)
|
||||
elif jira.getType() == "Sub-task":
|
||||
subtasklist.append(jira)
|
||||
elif jira.getType() == "Task":
|
||||
tasklist.append(jira)
|
||||
elif jira.getType() == "Test":
|
||||
testlist.append(jira)
|
||||
else:
|
||||
otherlist.append(jira)
|
||||
|
||||
choutputs.writeAll("\n\n### IMPROVEMENTS:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(improvementlist)
|
||||
line = '* [%s](https://issues.apache.org/jira/browse/%s) | *%s* | **%s**\n' \
|
||||
% (notableclean(jira.getId()), notableclean(jira.getId()), notableclean(jira.getPriority()),
|
||||
notableclean(jira.getSummary()))
|
||||
|
||||
choutputs.writeAll("\n\n### BUG FIXES:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(buglist)
|
||||
if (jira.getIncompatibleChange()) and (len(jira.getReleaseNote())==0):
|
||||
warningCount+=1
|
||||
reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n")
|
||||
reloutputs.writeKeyRaw(jira.getProject(), line)
|
||||
line ='\n**WARNING: No release note provided for this incompatible change.**\n\n'
|
||||
lintMessage += "\nWARNING: incompatible change %s lacks release notes." % (notableclean(jira.getId()))
|
||||
reloutputs.writeKeyRaw(jira.getProject(), line)
|
||||
|
||||
choutputs.writeAll("\n\n### TESTS:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(testlist)
|
||||
if jira.checkVersionString():
|
||||
warningCount+=1
|
||||
lintMessage += "\nWARNING: Version string problem for %s " % jira.getId()
|
||||
|
||||
choutputs.writeAll("\n\n### SUB-TASKS:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(subtasklist)
|
||||
if (jira.checkMissingComponent() or jira.checkMissingAssignee()):
|
||||
errorCount+=1
|
||||
errorMessage=[]
|
||||
jira.checkMissingComponent() and errorMessage.append("component")
|
||||
jira.checkMissingAssignee() and errorMessage.append("assignee")
|
||||
lintMessage += "\nERROR: missing %s for %s " % (" and ".join(errorMessage) , jira.getId())
|
||||
|
||||
choutputs.writeAll("\n\n### OTHER:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(otherlist)
|
||||
choutputs.writeList(tasklist)
|
||||
if (len(jira.getReleaseNote())>0):
|
||||
reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n")
|
||||
reloutputs.writeKeyRaw(jira.getProject(), line)
|
||||
line ='\n%s\n\n' % (tableclean(jira.getReleaseNote()))
|
||||
reloutputs.writeKeyRaw(jira.getProject(), line)
|
||||
|
||||
choutputs.writeAll("\n\n")
|
||||
choutputs.close()
|
||||
if (options.lint is True):
|
||||
print lintMessage
|
||||
print "======================================="
|
||||
print "%s: Error:%d, Warning:%d \n" % (vstr, errorCount, warningCount)
|
||||
if (errorCount>0):
|
||||
haderrors=True
|
||||
cleanOutputDir(vstr)
|
||||
continue
|
||||
|
||||
reloutputs.writeAll("\n\n")
|
||||
reloutputs.close()
|
||||
|
||||
choutputs.writeAll("### INCOMPATIBLE CHANGES:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(incompatlist)
|
||||
|
||||
choutputs.writeAll("\n\n### NEW FEATURES:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(newfeaturelist)
|
||||
|
||||
choutputs.writeAll("\n\n### IMPROVEMENTS:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(improvementlist)
|
||||
|
||||
choutputs.writeAll("\n\n### BUG FIXES:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(buglist)
|
||||
|
||||
choutputs.writeAll("\n\n### TESTS:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(testlist)
|
||||
|
||||
choutputs.writeAll("\n\n### SUB-TASKS:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(subtasklist)
|
||||
|
||||
choutputs.writeAll("\n\n### OTHER:\n\n")
|
||||
choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
|
||||
choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
|
||||
choutputs.writeList(otherlist)
|
||||
choutputs.writeList(tasklist)
|
||||
|
||||
choutputs.writeAll("\n\n")
|
||||
choutputs.close()
|
||||
|
||||
if options.index:
|
||||
buildindex(options.master)
|
||||
buildindex(title,options.license)
|
||||
|
||||
if haderrors is True:
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
#
|
||||
# Determine if the patch file is a git diff file with prefixes.
|
||||
# Determine if the git diff patch file has prefixes.
|
||||
# These files are generated via "git diff" *without* the --no-prefix option.
|
||||
#
|
||||
# We can apply these patches more easily because we know that the a/ and b/
|
||||
|
@ -21,28 +21,13 @@
|
|||
# And of course, we know that the patch file was generated using git, so we
|
||||
# know git apply can handle it properly.
|
||||
#
|
||||
# Arguments: file name.
|
||||
# Return: 0 if it is a git diff; 1 otherwise.
|
||||
# Arguments: git diff file name.
|
||||
# Return: 0 if it is a git diff with prefix; 1 otherwise.
|
||||
#
|
||||
is_git_diff_with_prefix() {
|
||||
DIFF_TYPE="unknown"
|
||||
while read -r line; do
|
||||
if [[ "$line" =~ ^diff\ ]]; then
|
||||
if [[ "$line" =~ ^diff\ \-\-git ]]; then
|
||||
DIFF_TYPE="git"
|
||||
else
|
||||
return 1 # All diff lines must be diff --git lines.
|
||||
fi
|
||||
fi
|
||||
if [[ "$line" =~ ^\+\+\+\ ]] ||
|
||||
[[ "$line" =~ ^\-\-\-\ ]]; then
|
||||
if ! [[ "$line" =~ ^....[ab]/ || "$line" =~ ^..../dev/null ]]; then
|
||||
return 1 # All +++ and --- lines must start with a/ or b/ or be /dev/null.
|
||||
fi
|
||||
fi
|
||||
done < $1
|
||||
[ x$DIFF_TYPE == x"git" ] || return 1
|
||||
return 0 # return true (= 0 in bash)
|
||||
has_prefix() {
|
||||
awk '/^diff --git / { if ($3 !~ "^a/" || $4 !~ "^b/") { exit 1 } }
|
||||
/^\+{3}|-{3} / { if ($2 !~ "^[ab]/" && $2 !~ "^/dev/null") { exit 1 } }' "$1"
|
||||
return $?
|
||||
}
|
||||
|
||||
PATCH_FILE=$1
|
||||
|
@ -100,17 +85,26 @@ if [[ ${PATCH_FILE} =~ ^http || ${PATCH_FILE} =~ ${ISSUE_RE} ]]; then
|
|||
PATCH_FILE="${PFILE}"
|
||||
fi
|
||||
|
||||
# Special case for git-diff patches without --no-prefix
|
||||
if is_git_diff_with_prefix "$PATCH_FILE"; then
|
||||
GIT_FLAGS="--binary -p1 -v"
|
||||
if [[ -z $DRY_RUN ]]; then
|
||||
GIT_FLAGS="$GIT_FLAGS --stat --apply "
|
||||
echo Going to apply git patch with: git apply "${GIT_FLAGS}"
|
||||
# Case for git-diff patches
|
||||
if grep -q "^diff --git" "${PATCH_FILE}"; then
|
||||
GIT_FLAGS="--binary -v"
|
||||
if has_prefix "$PATCH_FILE"; then
|
||||
GIT_FLAGS="$GIT_FLAGS -p1"
|
||||
else
|
||||
GIT_FLAGS="$GIT_FLAGS --check "
|
||||
GIT_FLAGS="$GIT_FLAGS -p0"
|
||||
fi
|
||||
if [[ -z $DRY_RUN ]]; then
|
||||
GIT_FLAGS="$GIT_FLAGS --stat --apply"
|
||||
echo Going to apply git patch with: git apply "${GIT_FLAGS}"
|
||||
else
|
||||
GIT_FLAGS="$GIT_FLAGS --check"
|
||||
fi
|
||||
# shellcheck disable=SC2086
|
||||
git apply ${GIT_FLAGS} "${PATCH_FILE}"
|
||||
exit $?
|
||||
if [[ $? == 0 ]]; then
|
||||
cleanup 0
|
||||
fi
|
||||
echo "git apply failed. Going to apply the patch with: ${PATCH}"
|
||||
fi
|
||||
|
||||
# Come up with a list of changed files into $TMP
|
||||
|
|
|
@ -72,7 +72,7 @@ function shellcheck_preapply
|
|||
start_clock
|
||||
|
||||
# shellcheck disable=SC2016
|
||||
SHELLCHECK_VERSION=$(shellcheck --version | ${GREP} version: | ${AWK} '{print $NF}')
|
||||
SHELLCHECK_VERSION=$(${SHELLCHECK} --version | ${GREP} version: | ${AWK} '{print $NF}')
|
||||
|
||||
echo "Running shellcheck against all identifiable shell scripts"
|
||||
pushd "${BASEDIR}" >/dev/null
|
||||
|
|
|
@ -33,6 +33,8 @@ function setup_defaults
|
|||
else
|
||||
MVN=${MAVEN_HOME}/bin/mvn
|
||||
fi
|
||||
# This parameter needs to be kept as an array
|
||||
MAVEN_ARGS=()
|
||||
|
||||
PROJECT_NAME=hadoop
|
||||
HOW_TO_CONTRIBUTE="https://wiki.apache.org/hadoop/HowToContribute"
|
||||
|
@ -44,6 +46,7 @@ function setup_defaults
|
|||
LOAD_SYSTEM_PLUGINS=true
|
||||
|
||||
FINDBUGS_HOME=${FINDBUGS_HOME:-}
|
||||
FINDBUGS_WARNINGS_FAIL_PRECHECK=false
|
||||
ECLIPSE_HOME=${ECLIPSE_HOME:-}
|
||||
BUILD_NATIVE=${BUILD_NATIVE:-true}
|
||||
PATCH_BRANCH=""
|
||||
|
@ -585,6 +588,7 @@ function hadoop_usage
|
|||
echo "--debug If set, then output some extra stuff to stderr"
|
||||
echo "--dirty-workspace Allow the local git workspace to have uncommitted changes"
|
||||
echo "--findbugs-home=<path> Findbugs home directory (default FINDBUGS_HOME environment variable)"
|
||||
echo "--findbugs-strict-precheck If there are Findbugs warnings during precheck, fail"
|
||||
echo "--issue-re=<expr> Bash regular expression to use when trying to find a jira ref in the patch name (default '^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$')"
|
||||
echo "--modulelist=<list> Specify additional modules to test (comma delimited)"
|
||||
echo "--offline Avoid connecting to the Internet"
|
||||
|
@ -666,6 +670,9 @@ function parse_args
|
|||
--findbugs-home=*)
|
||||
FINDBUGS_HOME=${i#*=}
|
||||
;;
|
||||
--findbugs-strict-precheck)
|
||||
FINDBUGS_WARNINGS_FAIL_PRECHECK=true
|
||||
;;
|
||||
--git-cmd=*)
|
||||
GIT=${i#*=}
|
||||
;;
|
||||
|
@ -751,6 +758,11 @@ function parse_args
|
|||
esac
|
||||
done
|
||||
|
||||
# if we requested offline, pass that to mvn
|
||||
if [[ ${OFFLINE} == "true" ]] ; then
|
||||
MAVEN_ARGS=(${MAVEN_ARGS[@]} --offline)
|
||||
fi
|
||||
|
||||
# we need absolute dir for ${BASEDIR}
|
||||
cd "${CWD}"
|
||||
BASEDIR=$(cd -P -- "${BASEDIR}" >/dev/null && pwd -P)
|
||||
|
@ -935,6 +947,12 @@ function git_checkout
|
|||
# we need to explicitly fetch in case the
|
||||
# git ref hasn't been brought in tree yet
|
||||
if [[ ${OFFLINE} == false ]]; then
|
||||
|
||||
if [[ -f .git/rebase-apply ]]; then
|
||||
hadoop_error "ERROR: previous rebase failed. Aborting it."
|
||||
${GIT} rebase --abort
|
||||
fi
|
||||
|
||||
${GIT} pull --rebase
|
||||
if [[ $? != 0 ]]; then
|
||||
hadoop_error "ERROR: git pull is failing"
|
||||
|
@ -1022,7 +1040,7 @@ function precheck_without_patch
|
|||
|
||||
if [[ $? == 1 ]]; then
|
||||
echo "Compiling ${mypwd}"
|
||||
echo_and_redirect "${PATCH_DIR}/${PATCH_BRANCH}JavacWarnings.txt" "${MVN}" clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch
|
||||
echo_and_redirect "${PATCH_DIR}/${PATCH_BRANCH}JavacWarnings.txt" "${MVN}" "${MAVEN_ARGS[@]}" clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch
|
||||
if [[ $? != 0 ]] ; then
|
||||
echo "${PATCH_BRANCH} compilation is broken?"
|
||||
add_jira_table -1 pre-patch "${PATCH_BRANCH} compilation may be broken."
|
||||
|
@ -1036,7 +1054,7 @@ function precheck_without_patch
|
|||
|
||||
if [[ $? == 1 ]]; then
|
||||
echo "Javadoc'ing ${mypwd}"
|
||||
echo_and_redirect "${PATCH_DIR}/${PATCH_BRANCH}JavadocWarnings.txt" "${MVN}" clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess
|
||||
echo_and_redirect "${PATCH_DIR}/${PATCH_BRANCH}JavadocWarnings.txt" "${MVN}" "${MAVEN_ARGS[@]}" clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess
|
||||
if [[ $? != 0 ]] ; then
|
||||
echo "Pre-patch ${PATCH_BRANCH} javadoc compilation is broken?"
|
||||
add_jira_table -1 pre-patch "Pre-patch ${PATCH_BRANCH} JavaDoc compilation may be broken."
|
||||
|
@ -1050,7 +1068,7 @@ function precheck_without_patch
|
|||
|
||||
if [[ $? == 1 ]]; then
|
||||
echo "site creation for ${mypwd}"
|
||||
echo_and_redirect "${PATCH_DIR}/${PATCH_BRANCH}SiteWarnings.txt" "${MVN}" clean site site:stage -DskipTests -Dmaven.javadoc.skip=true -D${PROJECT_NAME}PatchProcess
|
||||
echo_and_redirect "${PATCH_DIR}/${PATCH_BRANCH}SiteWarnings.txt" "${MVN}" "${MAVEN_ARGS[@]}" clean site site:stage -DskipTests -Dmaven.javadoc.skip=true -D${PROJECT_NAME}PatchProcess
|
||||
if [[ $? != 0 ]] ; then
|
||||
echo "Pre-patch ${PATCH_BRANCH} site compilation is broken?"
|
||||
add_jira_table -1 pre-patch "Pre-patch ${PATCH_BRANCH} site compilation may be broken."
|
||||
|
@ -1060,6 +1078,12 @@ function precheck_without_patch
|
|||
echo "Patch does not appear to need site tests."
|
||||
fi
|
||||
|
||||
precheck_findbugs
|
||||
|
||||
if [[ $? != 0 ]] ; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
add_jira_table 0 pre-patch "Pre-patch ${PATCH_BRANCH} compilation is healthy."
|
||||
return 0
|
||||
}
|
||||
|
@ -1455,7 +1479,8 @@ function apply_patch_file
|
|||
}
|
||||
|
||||
|
||||
## @description If this patches actually patches test-patch.sh, then
|
||||
## @description If this actually patches the files used for the QA process
|
||||
## @description under dev-support and its subdirectories, then
|
||||
## @description run with the patched version for the test.
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
|
@ -1471,7 +1496,7 @@ function check_reexec
|
|||
fi
|
||||
|
||||
if [[ ! ${CHANGED_FILES} =~ dev-support/test-patch
|
||||
|| ${CHANGED_FILES} =~ dev-support/smart-apply ]] ; then
|
||||
&& ! ${CHANGED_FILES} =~ dev-support/smart-apply ]] ; then
|
||||
return
|
||||
fi
|
||||
|
||||
|
@ -1492,7 +1517,7 @@ function check_reexec
|
|||
|
||||
rm "${commentfile}" 2>/dev/null
|
||||
|
||||
echo "(!) A patch to test-patch or smart-apply-patch has been detected. " > "${commentfile}"
|
||||
echo "(!) A patch to the files used for the QA process has been detected. " > "${commentfile}"
|
||||
echo "Re-executing against the patched versions to perform further tests. " >> "${commentfile}"
|
||||
echo "The console is at ${BUILD_URL}console in case of problems." >> "${commentfile}"
|
||||
|
||||
|
@ -1502,14 +1527,14 @@ function check_reexec
|
|||
|
||||
cd "${CWD}"
|
||||
mkdir -p "${PATCH_DIR}/dev-support-test"
|
||||
cp -pr "${BASEDIR}"/dev-support/test-patch* "${PATCH_DIR}/dev-support-test"
|
||||
cp -pr "${BASEDIR}"/dev-support/smart-apply* "${PATCH_DIR}/dev-support-test"
|
||||
(cd "${BINDIR}"; tar cpf - . ) \
|
||||
| (cd "${PATCH_DIR}/dev-support-test"; tar xpf - )
|
||||
|
||||
big_console_header "exec'ing test-patch.sh now..."
|
||||
|
||||
exec "${PATCH_DIR}/dev-support-test/test-patch.sh" \
|
||||
--reexec \
|
||||
--branch "${PATCH_BRANCH}" \
|
||||
--branch="${PATCH_BRANCH}" \
|
||||
--patch-dir="${PATCH_DIR}" \
|
||||
"${USER_PARAMS[@]}"
|
||||
}
|
||||
|
@ -1622,12 +1647,12 @@ function check_javadoc
|
|||
start_clock
|
||||
|
||||
if [[ -d hadoop-project ]]; then
|
||||
(cd hadoop-project; "${MVN}" install > /dev/null 2>&1)
|
||||
(cd hadoop-project; "${MVN}" "${MAVEN_ARGS[@]}" install > /dev/null 2>&1)
|
||||
fi
|
||||
if [[ -d hadoop-common-project/hadoop-annotations ]]; then
|
||||
(cd hadoop-common-project/hadoop-annotations; "${MVN}" install > /dev/null 2>&1)
|
||||
(cd hadoop-common-project/hadoop-annotations; "${MVN}" "${MAVEN_ARGS[@]}" install > /dev/null 2>&1)
|
||||
fi
|
||||
echo_and_redirect "${PATCH_DIR}/patchJavadocWarnings.txt" "${MVN}" clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess
|
||||
echo_and_redirect "${PATCH_DIR}/patchJavadocWarnings.txt" "${MVN}" "${MAVEN_ARGS[@]}" clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess
|
||||
count_javadoc_warns "${PATCH_DIR}/${PATCH_BRANCH}JavadocWarnings.txt"
|
||||
numBranchJavadocWarnings=$?
|
||||
count_javadoc_warns "${PATCH_DIR}/patchJavadocWarnings.txt"
|
||||
|
@ -1677,7 +1702,7 @@ function check_site
|
|||
start_clock
|
||||
|
||||
echo "site creation for ${mypwd}"
|
||||
echo_and_redirect "${PATCH_DIR}/patchSiteWarnings.txt" "${MVN}" clean site site:stage -DskipTests -Dmaven.javadoc.skip=true -D${PROJECT_NAME}PatchProcess
|
||||
echo_and_redirect "${PATCH_DIR}/patchSiteWarnings.txt" "${MVN}" "${MAVEN_ARGS[@]}" clean site site:stage -DskipTests -Dmaven.javadoc.skip=true -D${PROJECT_NAME}PatchProcess
|
||||
if [[ $? != 0 ]] ; then
|
||||
echo "Site compilation is broken"
|
||||
add_jira_table -1 site "Site compilation is broken."
|
||||
|
@ -1723,7 +1748,7 @@ function check_javac
|
|||
|
||||
start_clock
|
||||
|
||||
echo_and_redirect "${PATCH_DIR}/patchJavacWarnings.txt" "${MVN}" clean test -DskipTests -D${PROJECT_NAME}PatchProcess ${NATIVE_PROFILE} -Ptest-patch
|
||||
echo_and_redirect "${PATCH_DIR}/patchJavacWarnings.txt" "${MVN}" "${MAVEN_ARGS[@]}" clean test -DskipTests -D${PROJECT_NAME}PatchProcess ${NATIVE_PROFILE} -Ptest-patch
|
||||
if [[ $? != 0 ]] ; then
|
||||
add_jira_table -1 javac "The patch appears to cause the build to fail."
|
||||
return 2
|
||||
|
@ -1773,7 +1798,7 @@ function check_apachelicense
|
|||
|
||||
start_clock
|
||||
|
||||
echo_and_redirect "${PATCH_DIR}/patchReleaseAuditOutput.txt" "${MVN}" apache-rat:check -D${PROJECT_NAME}PatchProcess
|
||||
echo_and_redirect "${PATCH_DIR}/patchReleaseAuditOutput.txt" "${MVN}" "${MAVEN_ARGS[@]}" apache-rat:check -D${PROJECT_NAME}PatchProcess
|
||||
#shellcheck disable=SC2038
|
||||
find "${BASEDIR}" -name rat.txt | xargs cat > "${PATCH_DIR}/patchReleaseAuditWarnings.txt"
|
||||
|
||||
|
@ -1828,7 +1853,7 @@ function check_mvn_install
|
|||
big_console_header "Installing all of the jars"
|
||||
|
||||
start_clock
|
||||
echo_and_redirect "${PATCH_DIR}/jarinstall.txt" "${MVN}" install -Dmaven.javadoc.skip=true -DskipTests -D${PROJECT_NAME}PatchProcess
|
||||
echo_and_redirect "${PATCH_DIR}/jarinstall.txt" "${MVN}" "${MAVEN_ARGS[@]}" install -Dmaven.javadoc.skip=true -DskipTests -D${PROJECT_NAME}PatchProcess
|
||||
retval=$?
|
||||
if [[ ${retval} != 0 ]]; then
|
||||
add_jira_table -1 install "The patch causes mvn install to fail."
|
||||
|
@ -1838,6 +1863,137 @@ function check_mvn_install
|
|||
return ${retval}
|
||||
}
|
||||
|
||||
## @description are the needed bits for findbugs present?
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable no
|
||||
## @return 0 findbugs will work for our use
|
||||
## @return 1 findbugs is missing some component
|
||||
function findbugs_is_installed
|
||||
{
|
||||
if [[ ! -e "${FINDBUGS_HOME}/bin/findbugs" ]]; then
|
||||
printf "\n\n%s is not executable.\n\n" "${FINDBUGS_HOME}/bin/findbugs"
|
||||
add_jira_table -1 findbugs "Findbugs is not installed."
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
## @description Run the maven findbugs plugin and record found issues in a bug database
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable no
|
||||
## @return 0 on success
|
||||
## @return 1 on failure
|
||||
function findbugs_mvnrunner
|
||||
{
|
||||
local name=$1
|
||||
local logfile=$2
|
||||
local warnings_file=$3
|
||||
|
||||
echo_and_redirect "${logfile}" "${MVN}" "${MAVEN_ARGS[@]}" clean test findbugs:findbugs -DskipTests \
|
||||
"-D${PROJECT_NAME}PatchProcess" < /dev/null
|
||||
if [[ $? != 0 ]]; then
|
||||
return 1
|
||||
fi
|
||||
cp target/findbugsXml.xml "${warnings_file}.xml"
|
||||
|
||||
"${FINDBUGS_HOME}/bin/setBugDatabaseInfo" -name "${name}" \
|
||||
"${warnings_file}.xml" "${warnings_file}.xml"
|
||||
if [[ $? != 0 ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
"${FINDBUGS_HOME}/bin/convertXmlToText" -html "${warnings_file}.xml" \
|
||||
"${warnings_file}.html"
|
||||
if [[ $? != 0 ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
## @description Track pre-existing findbugs warnings
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable no
|
||||
## @return 0 on success
|
||||
## @return 1 on failure
|
||||
function precheck_findbugs
|
||||
{
|
||||
local -r mypwd=$(pwd)
|
||||
local module_suffix
|
||||
local modules=${CHANGED_MODULES}
|
||||
local module
|
||||
local findbugs_version
|
||||
local rc=0
|
||||
local module_findbugs_warnings
|
||||
local findbugs_warnings=0
|
||||
|
||||
verify_needed_test findbugs
|
||||
|
||||
if [[ $? == 0 ]]; then
|
||||
echo "Patch does not appear to need findbugs tests."
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "findbugs baseline for ${mypwd}"
|
||||
|
||||
findbugs_is_installed
|
||||
if [[ $? != 0 ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
for module in ${modules}
|
||||
do
|
||||
pushd "${module}" >/dev/null
|
||||
echo " Running findbugs in ${module}"
|
||||
module_suffix=$(basename "${module}")
|
||||
findbugs_mvnrunner "${PATCH_BRANCH}" \
|
||||
"${PATCH_DIR}/${PATCH_BRANCH}FindBugsOutput${module_suffix}.txt" \
|
||||
"${PATCH_DIR}/${PATCH_BRANCH}FindbugsWarnings${module_suffix}"
|
||||
(( rc = rc + $? ))
|
||||
|
||||
if [[ "${FINDBUGS_WARNINGS_FAIL_PRECHECK}" == "true" ]]; then
|
||||
#shellcheck disable=SC2016
|
||||
module_findbugs_warnings=$("${FINDBUGS_HOME}/bin/filterBugs" -first \
|
||||
"${PATCH_BRANCH}" \
|
||||
"${PATCH_DIR}/${PATCH_BRANCH}FindbugsWarnings${module_suffix}".xml \
|
||||
"${PATCH_DIR}/${PATCH_BRANCH}FindbugsWarnings${module_suffix}".xml \
|
||||
| ${AWK} '{print $1}')
|
||||
if [[ $? != 0 ]]; then
|
||||
popd >/dev/null
|
||||
return 1
|
||||
fi
|
||||
|
||||
findbugs_warnings=$((findbugs_warnings+module_findbugs_warnings))
|
||||
|
||||
if [[ ${module_findbugs_warnings} -gt 0 ]] ; then
|
||||
add_jira_footer "Pre-patch Findbugs warnings" "@@BASE@@/${PATCH_BRANCH}FindbugsWarnings${module_suffix}.html"
|
||||
fi
|
||||
fi
|
||||
popd >/dev/null
|
||||
done
|
||||
|
||||
#shellcheck disable=SC2016
|
||||
findbugs_version=$(${AWK} 'match($0, /findbugs-maven-plugin:[^:]*:findbugs/) { print substr($0, RSTART + 22, RLENGTH - 31); exit }' "${PATCH_DIR}/${PATCH_BRANCH}FindBugsOutput${module_suffix}.txt")
|
||||
|
||||
if [[ ${rc} -ne 0 ]]; then
|
||||
echo "Pre-patch ${PATCH_BRANCH} findbugs is broken?"
|
||||
add_jira_table -1 pre-patch "Findbugs (version ${findbugs_version}) appears to be broken on ${PATCH_BRANCH}."
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "${FINDBUGS_WARNINGS_FAIL_PRECHECK}" == "true" && \
|
||||
${findbugs_warnings} -gt 0 ]] ; then
|
||||
echo "Pre-patch ${PATCH_BRANCH} findbugs has ${findbugs_warnings} warnings."
|
||||
add_jira_table -1 pre-patch "Pre-patch ${PATCH_BRANCH} has ${findbugs_warnings} extant Findbugs (version ${findbugs_version}) warnings."
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
## @description Verify patch does not trigger any findbugs warnings
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
|
@ -1846,32 +2002,33 @@ function check_mvn_install
|
|||
## @return 1 on failure
|
||||
function check_findbugs
|
||||
{
|
||||
local findbugs_version
|
||||
local modules=${CHANGED_MODULES}
|
||||
local rc=0
|
||||
local module
|
||||
local modules=${CHANGED_MODULES}
|
||||
local module_suffix
|
||||
local findbugsWarnings=0
|
||||
local relative_file
|
||||
local newFindbugsWarnings
|
||||
local findbugsWarnings
|
||||
local combined_xml
|
||||
local newBugs
|
||||
local new_findbugs_warnings
|
||||
local new_findbugs_fixed_warnings
|
||||
local findbugs_warnings=0
|
||||
local findbugs_fixed_warnings=0
|
||||
local line
|
||||
local firstpart
|
||||
local secondpart
|
||||
|
||||
big_console_header "Determining number of patched Findbugs warnings."
|
||||
|
||||
local findbugs_version
|
||||
|
||||
verify_needed_test findbugs
|
||||
|
||||
if [[ $? == 0 ]]; then
|
||||
echo "Patch does not touch any java files. Skipping findbugs."
|
||||
return 0
|
||||
fi
|
||||
|
||||
big_console_header "Determining number of patched Findbugs warnings."
|
||||
|
||||
start_clock
|
||||
|
||||
if [[ ! -e "${FINDBUGS_HOME}/bin/findbugs" ]]; then
|
||||
printf "\n\n%s is not executable.\n\n" "${FINDBUGS_HOME}/bin/findbugs"
|
||||
add_jira_table -1 findbugs "Findbugs is not installed."
|
||||
findbugs_is_installed
|
||||
if [[ $? != 0 ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
@ -1880,67 +2037,82 @@ function check_findbugs
|
|||
pushd "${module}" >/dev/null
|
||||
echo " Running findbugs in ${module}"
|
||||
module_suffix=$(basename "${module}")
|
||||
echo_and_redirect "${PATCH_DIR}/patchFindBugsOutput${module_suffix}.txt" "${MVN}" clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess \
|
||||
< /dev/null
|
||||
(( rc = rc + $? ))
|
||||
|
||||
findbugs_mvnrunner patch \
|
||||
"${PATCH_DIR}/patchFindBugsOutput${module_suffix}.txt" \
|
||||
"${PATCH_DIR}/patchFindbugsWarnings${module_suffix}"
|
||||
|
||||
if [[ $? != 0 ]] ; then
|
||||
((rc = rc +1))
|
||||
echo "Post-patch findbugs compilation is broken."
|
||||
add_jira_table -1 findbugs "Post-patch findbugs ${module} compilation is broken."
|
||||
continue
|
||||
fi
|
||||
|
||||
combined_xml="$PATCH_DIR/combinedFindbugsWarnings${module_suffix}.xml"
|
||||
newBugs="${PATCH_DIR}/newPatchFindbugsWarnings${module_suffix}"
|
||||
"${FINDBUGS_HOME}/bin/computeBugHistory" -useAnalysisTimes -withMessages \
|
||||
-output "${combined_xml}" \
|
||||
"${PATCH_DIR}/${PATCH_BRANCH}FindbugsWarnings${module_suffix}.xml" \
|
||||
"${PATCH_DIR}/patchFindbugsWarnings${module_suffix}.xml"
|
||||
if [[ $? != 0 ]]; then
|
||||
popd >/dev/null
|
||||
return 1
|
||||
fi
|
||||
|
||||
#shellcheck disable=SC2016
|
||||
new_findbugs_warnings=$("${FINDBUGS_HOME}/bin/filterBugs" -first patch \
|
||||
"${combined_xml}" "${newBugs}.xml" | ${AWK} '{print $1}')
|
||||
if [[ $? != 0 ]]; then
|
||||
popd >/dev/null
|
||||
return 1
|
||||
fi
|
||||
#shellcheck disable=SC2016
|
||||
new_findbugs_fixed_warnings=$("${FINDBUGS_HOME}/bin/filterBugs" -fixed patch \
|
||||
"${combined_xml}" "${newBugs}.xml" | ${AWK} '{print $1}')
|
||||
if [[ $? != 0 ]]; then
|
||||
popd >/dev/null
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Found ${new_findbugs_warnings} new Findbugs warnings and ${new_findbugs_fixed_warnings} newly fixed warnings."
|
||||
findbugs_warnings=$((findbugs_warnings+new_findbugs_warnings))
|
||||
findbugs_fixed_warnings=$((findbugs_fixed_warnings+new_findbugs_fixed_warnings))
|
||||
|
||||
"${FINDBUGS_HOME}/bin/convertXmlToText" -html "${newBugs}.xml" \
|
||||
"${newBugs}.html"
|
||||
if [[ $? != 0 ]]; then
|
||||
popd >/dev/null
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ ${new_findbugs_warnings} -gt 0 ]] ; then
|
||||
populate_test_table FindBugs "module:${module_suffix}"
|
||||
while read line; do
|
||||
firstpart=$(echo "${line}" | cut -f2 -d:)
|
||||
secondpart=$(echo "${line}" | cut -f9- -d' ')
|
||||
add_jira_test_table "" "${firstpart}:${secondpart}"
|
||||
done < <("${FINDBUGS_HOME}/bin/convertXmlToText" "${newBugs}.xml")
|
||||
|
||||
add_jira_footer "Findbugs warnings" "@@BASE@@/newPatchFindbugsWarnings${module_suffix}.html"
|
||||
fi
|
||||
|
||||
popd >/dev/null
|
||||
done
|
||||
|
||||
#shellcheck disable=SC2016
|
||||
findbugs_version=$(${AWK} 'match($0, /findbugs-maven-plugin:[^:]*:findbugs/) { print substr($0, RSTART + 22, RLENGTH - 31); exit }' "${PATCH_DIR}/patchFindBugsOutput${module_suffix}.txt")
|
||||
|
||||
if [[ ${rc} -ne 0 ]]; then
|
||||
add_jira_table -1 findbugs "The patch appears to cause Findbugs (version ${findbugs_version}) to fail."
|
||||
if [[ ${findbugs_warnings} -gt 0 ]] ; then
|
||||
add_jira_table -1 findbugs "The patch appears to introduce ${findbugs_warnings} new Findbugs (version ${findbugs_version}) warnings."
|
||||
return 1
|
||||
fi
|
||||
|
||||
while read file
|
||||
do
|
||||
relative_file=${file#${BASEDIR}/} # strip leading ${BASEDIR} prefix
|
||||
if [[ ${relative_file} != "target/findbugsXml.xml" ]]; then
|
||||
module_suffix=${relative_file%/target/findbugsXml.xml} # strip trailing path
|
||||
module_suffix=$(basename "${module_suffix}")
|
||||
fi
|
||||
|
||||
cp "${file}" "${PATCH_DIR}/patchFindbugsWarnings${module_suffix}.xml"
|
||||
|
||||
"${FINDBUGS_HOME}/bin/setBugDatabaseInfo" -timestamp "01/01/2000" \
|
||||
"${PATCH_DIR}/patchFindbugsWarnings${module_suffix}.xml" \
|
||||
"${PATCH_DIR}/patchFindbugsWarnings${module_suffix}.xml"
|
||||
|
||||
#shellcheck disable=SC2016
|
||||
newFindbugsWarnings=$("${FINDBUGS_HOME}/bin/filterBugs" \
|
||||
-first "01/01/2000" "${PATCH_DIR}/patchFindbugsWarnings${module_suffix}.xml" \
|
||||
"${PATCH_DIR}/newPatchFindbugsWarnings${module_suffix}.xml" \
|
||||
| ${AWK} '{print $1}')
|
||||
|
||||
echo "Found $newFindbugsWarnings Findbugs warnings ($file)"
|
||||
|
||||
findbugsWarnings=$((findbugsWarnings+newFindbugsWarnings))
|
||||
|
||||
"${FINDBUGS_HOME}/bin/convertXmlToText" -html \
|
||||
"${PATCH_DIR}/newPatchFindbugsWarnings${module_suffix}.xml" \
|
||||
"${PATCH_DIR}/newPatchFindbugsWarnings${module_suffix}.html"
|
||||
|
||||
if [[ ${newFindbugsWarnings} -gt 0 ]] ; then
|
||||
populate_test_table FindBugs "module:${module_suffix}"
|
||||
while read line; do
|
||||
firstpart=$(echo "${line}" | cut -f2 -d:)
|
||||
secondpart=$(echo "${line}" | cut -f9- -d' ')
|
||||
add_jira_test_table "" "${firstpart}:${secondpart}"
|
||||
done < <("${FINDBUGS_HOME}/bin/convertXmlToText" \
|
||||
"${PATCH_DIR}/newPatchFindbugsWarnings${module_suffix}.xml")
|
||||
|
||||
add_jira_footer "Findbugs warnings" "@@BASE@@/newPatchFindbugsWarnings${module_suffix}.html"
|
||||
fi
|
||||
done < <(find "${BASEDIR}" -name findbugsXml.xml)
|
||||
|
||||
if [[ ${findbugsWarnings} -gt 0 ]] ; then
|
||||
add_jira_table -1 findbugs "The patch appears to introduce ${findbugsWarnings} new Findbugs (version ${findbugs_version}) warnings."
|
||||
return 1
|
||||
if [[ ${findbugs_fixed_warnings} -gt 0 ]] ; then
|
||||
add_jira_table +1 findbugs "The patch does not introduce any new Findbugs (version ${findbugs_version}) warnings, and fixes ${findbugs_fixed_warnings} pre-existing warnings."
|
||||
else
|
||||
add_jira_table +1 findbugs "The patch does not introduce any new Findbugs (version ${findbugs_version}) warnings."
|
||||
fi
|
||||
|
||||
add_jira_table +1 findbugs "The patch does not introduce any new Findbugs (version ${findbugs_version}) warnings."
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -1962,7 +2134,7 @@ function check_mvn_eclipse
|
|||
|
||||
start_clock
|
||||
|
||||
echo_and_redirect "${PATCH_DIR}/patchEclipseOutput.txt" "${MVN}" eclipse:eclipse -D${PROJECT_NAME}PatchProcess
|
||||
echo_and_redirect "${PATCH_DIR}/patchEclipseOutput.txt" "${MVN}" "${MAVEN_ARGS[@]}" eclipse:eclipse -D${PROJECT_NAME}PatchProcess
|
||||
if [[ $? != 0 ]] ; then
|
||||
add_jira_table -1 eclipse:eclipse "The patch failed to build with eclipse:eclipse."
|
||||
return 1
|
||||
|
@ -2053,7 +2225,7 @@ function check_unittests
|
|||
ordered_modules="${ordered_modules} ${hdfs_modules}"
|
||||
if [[ ${building_common} -eq 0 ]]; then
|
||||
echo " Building hadoop-common with -Pnative in order to provide libhadoop.so to the hadoop-hdfs unit tests."
|
||||
echo_and_redirect "${PATCH_DIR}/testrun_native.txt" "${MVN}" compile ${NATIVE_PROFILE} "-D${PROJECT_NAME}PatchProcess"
|
||||
echo_and_redirect "${PATCH_DIR}/testrun_native.txt" "${MVN}" "${MAVEN_ARGS[@]}" compile ${NATIVE_PROFILE} "-D${PROJECT_NAME}PatchProcess"
|
||||
if [[ $? != 0 ]]; then
|
||||
add_jira_table -1 "native" "Failed to build the native portion " \
|
||||
"of hadoop-common prior to running the unit tests in ${ordered_modules}"
|
||||
|
@ -2073,7 +2245,7 @@ function check_unittests
|
|||
|
||||
test_logfile=${PATCH_DIR}/testrun_${module_suffix}.txt
|
||||
echo " Running tests in ${module_suffix}"
|
||||
echo_and_redirect "${test_logfile}" "${MVN}" clean install -fae ${NATIVE_PROFILE} ${REQUIRE_TEST_LIB_HADOOP} -D${PROJECT_NAME}PatchProcess
|
||||
echo_and_redirect "${test_logfile}" "${MVN}" "${MAVEN_ARGS[@]}" clean install -fae ${NATIVE_PROFILE} ${REQUIRE_TEST_LIB_HADOOP} -D${PROJECT_NAME}PatchProcess
|
||||
test_build_result=$?
|
||||
|
||||
add_jira_footer "${module_suffix} test log" "@@BASE@@/testrun_${module_suffix}.txt"
|
||||
|
|
|
@ -95,6 +95,10 @@
|
|||
<groupId>com.jcraft</groupId>
|
||||
<artifactId>jsch</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
|
@ -171,6 +175,10 @@
|
|||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
|
|
|
@ -160,6 +160,12 @@ public class AuthenticationFilter implements Filter {
|
|||
*/
|
||||
public static final String COOKIE_PATH = "cookie.path";
|
||||
|
||||
/**
|
||||
* Constant for the configuration property
|
||||
* that indicates the persistence of the HTTP cookie.
|
||||
*/
|
||||
public static final String COOKIE_PERSISTENT = "cookie.persistent";
|
||||
|
||||
/**
|
||||
* Constant for the configuration property that indicates the name of the
|
||||
* SignerSecretProvider class to use.
|
||||
|
@ -187,6 +193,7 @@ public class AuthenticationFilter implements Filter {
|
|||
private long validity;
|
||||
private String cookieDomain;
|
||||
private String cookiePath;
|
||||
private boolean isCookiePersistent;
|
||||
private boolean isInitializedByTomcat;
|
||||
|
||||
/**
|
||||
|
@ -228,6 +235,9 @@ public class AuthenticationFilter implements Filter {
|
|||
|
||||
cookieDomain = config.getProperty(COOKIE_DOMAIN, null);
|
||||
cookiePath = config.getProperty(COOKIE_PATH, null);
|
||||
isCookiePersistent = Boolean.parseBoolean(
|
||||
config.getProperty(COOKIE_PERSISTENT, "false"));
|
||||
|
||||
}
|
||||
|
||||
protected void initializeAuthHandler(String authHandlerClassName, FilterConfig filterConfig)
|
||||
|
@ -371,6 +381,15 @@ public class AuthenticationFilter implements Filter {
|
|||
return cookiePath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the cookie persistence to use for the HTTP cookie.
|
||||
*
|
||||
* @return the cookie persistence to use for the HTTP cookie.
|
||||
*/
|
||||
protected boolean isCookiePersistent() {
|
||||
return isCookiePersistent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroys the filter.
|
||||
* <p>
|
||||
|
@ -549,7 +568,8 @@ public class AuthenticationFilter implements Filter {
|
|||
if (newToken && !token.isExpired() && token != AuthenticationToken.ANONYMOUS) {
|
||||
String signedToken = signer.sign(token.toString());
|
||||
createAuthCookie(httpResponse, signedToken, getCookieDomain(),
|
||||
getCookiePath(), token.getExpires(), isHttps);
|
||||
getCookiePath(), token.getExpires(),
|
||||
isCookiePersistent(), isHttps);
|
||||
}
|
||||
doFilter(filterChain, httpRequest, httpResponse);
|
||||
}
|
||||
|
@ -569,7 +589,7 @@ public class AuthenticationFilter implements Filter {
|
|||
if (unauthorizedResponse) {
|
||||
if (!httpResponse.isCommitted()) {
|
||||
createAuthCookie(httpResponse, "", getCookieDomain(),
|
||||
getCookiePath(), 0, isHttps);
|
||||
getCookiePath(), 0, isCookiePersistent(), isHttps);
|
||||
// If response code is 401. Then WWW-Authenticate Header should be
|
||||
// present.. reset to 403 if not found..
|
||||
if ((errCode == HttpServletResponse.SC_UNAUTHORIZED)
|
||||
|
@ -614,6 +634,7 @@ public class AuthenticationFilter implements Filter {
|
|||
* @param isSecure is the cookie secure?
|
||||
* @param token the token.
|
||||
* @param expires the cookie expiration time.
|
||||
* @param isCookiePersistent whether the cookie is persistent or not.
|
||||
*
|
||||
* XXX the following code duplicate some logic in Jetty / Servlet API,
|
||||
* because of the fact that Hadoop is stuck at servlet 2.5 and jetty 6
|
||||
|
@ -621,6 +642,7 @@ public class AuthenticationFilter implements Filter {
|
|||
*/
|
||||
public static void createAuthCookie(HttpServletResponse resp, String token,
|
||||
String domain, String path, long expires,
|
||||
boolean isCookiePersistent,
|
||||
boolean isSecure) {
|
||||
StringBuilder sb = new StringBuilder(AuthenticatedURL.AUTH_COOKIE)
|
||||
.append("=");
|
||||
|
@ -636,7 +658,7 @@ public class AuthenticationFilter implements Filter {
|
|||
sb.append("; Domain=").append(domain);
|
||||
}
|
||||
|
||||
if (expires >= 0) {
|
||||
if (expires >= 0 && isCookiePersistent) {
|
||||
Date date = new Date(expires);
|
||||
SimpleDateFormat df = new SimpleDateFormat("EEE, " +
|
||||
"dd-MMM-yyyy HH:mm:ss zzz");
|
||||
|
|
|
@ -73,12 +73,13 @@ To use Kerberos SPNEGO as the authentication mechanism, the authentication filte
|
|||
|
||||
**Example**:
|
||||
|
||||
```xml
|
||||
<web-app version="2.5" xmlns="http://java.sun.com/xml/ns/javaee">
|
||||
...
|
||||
|
||||
<filter>
|
||||
<filter-name>kerberosFilter</filter-name>
|
||||
<filter-class>org.apache.hadoop.security.auth.server.AuthenticationFilter</filter-class>
|
||||
<filter-class>org.apache.hadoop.security.authentication.server.AuthenticationFilter</filter-class>
|
||||
<init-param>
|
||||
<param-name>type</param-name>
|
||||
<param-value>kerberos</param-value>
|
||||
|
@ -112,6 +113,7 @@ To use Kerberos SPNEGO as the authentication mechanism, the authentication filte
|
|||
|
||||
...
|
||||
</web-app>
|
||||
```
|
||||
|
||||
### Pseudo/Simple Configuration
|
||||
|
||||
|
@ -125,12 +127,13 @@ To use Pseudo/Simple as the authentication mechanism (trusting the value of the
|
|||
|
||||
**Example**:
|
||||
|
||||
```xml
|
||||
<web-app version="2.5" xmlns="http://java.sun.com/xml/ns/javaee">
|
||||
...
|
||||
|
||||
<filter>
|
||||
<filter-name>simpleFilter</filter-name>
|
||||
<filter-class>org.apache.hadoop.security.auth.server.AuthenticationFilter</filter-class>
|
||||
<filter-class>org.apache.hadoop.security.authentication.server.AuthenticationFilter</filter-class>
|
||||
<init-param>
|
||||
<param-name>type</param-name>
|
||||
<param-value>simple</param-value>
|
||||
|
@ -160,6 +163,7 @@ To use Pseudo/Simple as the authentication mechanism (trusting the value of the
|
|||
|
||||
...
|
||||
</web-app>
|
||||
```
|
||||
|
||||
### AltKerberos Configuration
|
||||
|
||||
|
@ -175,12 +179,13 @@ The AltKerberos authentication mechanism is a partially implemented derivative o
|
|||
|
||||
**Example**:
|
||||
|
||||
```xml
|
||||
<web-app version="2.5" xmlns="http://java.sun.com/xml/ns/javaee">
|
||||
...
|
||||
|
||||
<filter>
|
||||
<filter-name>kerberosFilter</filter-name>
|
||||
<filter-class>org.apache.hadoop.security.auth.server.AuthenticationFilter</filter-class>
|
||||
<filter-class>org.apache.hadoop.security.authentication.server.AuthenticationFilter</filter-class>
|
||||
<init-param>
|
||||
<param-name>type</param-name>
|
||||
<param-value>org.my.subclass.of.AltKerberosAuthenticationHandler</param-value>
|
||||
|
@ -218,6 +223,7 @@ The AltKerberos authentication mechanism is a partially implemented derivative o
|
|||
|
||||
...
|
||||
</web-app>
|
||||
```
|
||||
|
||||
### SignerSecretProvider Configuration
|
||||
|
||||
|
@ -262,6 +268,7 @@ The following configuration properties are specific to the `zookeeper` implement
|
|||
|
||||
**Example**:
|
||||
|
||||
```xml
|
||||
<web-app version="2.5" xmlns="http://java.sun.com/xml/ns/javaee">
|
||||
...
|
||||
|
||||
|
@ -279,9 +286,11 @@ The following configuration properties are specific to the `zookeeper` implement
|
|||
|
||||
...
|
||||
</web-app>
|
||||
```
|
||||
|
||||
**Example**:
|
||||
|
||||
```xml
|
||||
<web-app version="2.5" xmlns="http://java.sun.com/xml/ns/javaee">
|
||||
...
|
||||
|
||||
|
@ -299,9 +308,11 @@ The following configuration properties are specific to the `zookeeper` implement
|
|||
|
||||
...
|
||||
</web-app>
|
||||
```
|
||||
|
||||
**Example**:
|
||||
|
||||
```xml
|
||||
<web-app version="2.5" xmlns="http://java.sun.com/xml/ns/javaee">
|
||||
...
|
||||
|
||||
|
@ -339,3 +350,4 @@ The following configuration properties are specific to the `zookeeper` implement
|
|||
|
||||
...
|
||||
</web-app>
|
||||
```
|
||||
|
|
|
@ -48,8 +48,16 @@ Trunk (Unreleased)
|
|||
HADOOP-9642. Configuration to resolve environment variables via
|
||||
${env.VARIABLE} references (Kengo Seki via aw)
|
||||
|
||||
HADOOP-7947. Validate XMLs if a relevant tool is available, when using
|
||||
scripts (Kengo Seki via aw)
|
||||
|
||||
HADOOP-10854. unit tests for the shell scripts (aw)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-11203. Allow ditscp to accept bandwitdh in fraction MegaBytes
|
||||
(Raju Bairishetti via amareshwari)
|
||||
|
||||
HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution
|
||||
not covered (Eric Charles via bobby)
|
||||
|
||||
|
@ -221,6 +229,15 @@ Trunk (Unreleased)
|
|||
HADOOP-12016. Typo in FileSystem::listStatusIterator
|
||||
(Arthur Vigil via jghoman)
|
||||
|
||||
HADOOP-11142. Remove hdfs dfs reference from file system shell
|
||||
documentation (Kengo Seki via aw)
|
||||
|
||||
HADOOP-12149. copy all of test-patch BINDIR prior to re-exec (aw)
|
||||
|
||||
HADOOP-10979. Auto-entries in hadoop_usage (aw)
|
||||
|
||||
HADOOP-12249. pull argument parsing into a function (aw)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-11473. test-patch says "-1 overall" even when all checks are +1
|
||||
|
@ -463,6 +480,32 @@ Trunk (Unreleased)
|
|||
HADOOP-11775. Fix Javadoc typos in hadoop-openstack module (Yanjun Wang
|
||||
via aw)
|
||||
|
||||
HADOOP-9891. CLIMiniCluster instructions fail with MiniYarnCluster
|
||||
ClassNotFoundException (Darrell Taylor via aw)
|
||||
|
||||
HADOOP-11406. xargs -P is not portable (Kengo Seki via aw)
|
||||
|
||||
HADOOP-11983. HADOOP_USER_CLASSPATH_FIRST works the opposite of what it is
|
||||
supposed to do (Sangjin Lee via aw)
|
||||
|
||||
HADOOP-12022. fix site -Pdocs -Pdist in hadoop-project-dist; cleanout
|
||||
remaining forrest bits (aw)
|
||||
|
||||
HADOOP-9905. remove dependency of zookeeper for hadoop-client (vinayakumarb)
|
||||
|
||||
HADOOP-11347. RawLocalFileSystem#mkdir and create should honor umask (Varun
|
||||
Saxena via Colin P. McCabe)
|
||||
|
||||
HADOOP-12107. long running apps may have a huge number of StatisticsData
|
||||
instances under FileSystem (Sangjin Lee via Ming Ma)
|
||||
|
||||
HADOOP-11762. Enable swift distcp to secure HDFS (Chen He via aw)
|
||||
|
||||
HADOOP-12009. Clarify FileSystem.listStatus() sorting order & fix
|
||||
FileSystemContractBaseTest:testListStatus. (J.Andreina via jghoman)
|
||||
|
||||
HADOOP-12244. recover broken rebase during precommit (aw)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||
|
@ -494,8 +537,14 @@ Release 2.8.0 - UNRELEASED
|
|||
HADOOP-10971. Add -C flag to make `hadoop fs -ls` print filenames only.
|
||||
(Kengo Seki via aajisaka)
|
||||
|
||||
HADOOP-5732. Add SFTP FileSystem. (Ramtin Boustani and Inigo Goiri via
|
||||
cdouglas)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-12271. Hadoop Jar Error Should Be More Explanatory
|
||||
(Josh Elser via harsh)
|
||||
|
||||
HADOOP-6842. "hadoop fs -text" does not give a useful text representation
|
||||
of MapWritable objects (Akira Ajisaka via bobby)
|
||||
|
||||
|
@ -603,6 +652,104 @@ Release 2.8.0 - UNRELEASED
|
|||
HADOOP-11594. Improve the readability of site index of documentation.
|
||||
(Masatake Iwasaki via aajisaka)
|
||||
|
||||
HADOOP-12030. test-patch should only report on newly introduced
|
||||
findbugs warnings. (Sean Busbey via aw)
|
||||
|
||||
HADOOP-11894. Bump the version of Apache HTrace to 3.2.0-incubating
|
||||
(Masatake Iwasaki via Colin P. McCabe)
|
||||
|
||||
HADOOP-12043. Display warning if defaultFs is not set when running fs
|
||||
commands. (Lei Xu via wang)
|
||||
|
||||
HADOOP-12037. Fix wrong classname in example configuration of hadoop-auth
|
||||
documentation. (Masatake Iwasaki via wang)
|
||||
|
||||
HADOOP-12059. S3Credentials should support use of CredentialProvider.
|
||||
(Sean Busbey via wang)
|
||||
|
||||
HADOOP-12056. Use DirectoryStream in DiskChecker#checkDirs to detect
|
||||
errors when listing a directory. (Zhihai Xu via wang)
|
||||
|
||||
HADOOP-12055. Deprecate usage of NativeIO#link. (Andrew Wang via cnauroth)
|
||||
|
||||
HADOOP-11971. Move test utilities for tracing from hadoop-hdfs to
|
||||
hadoop-common. (Masatake Iwasaki via aajisaka)
|
||||
|
||||
HADOOP-11965. determine-flaky-tests needs a summary mode.
|
||||
(Yufei Gu via Yongjun Zhang)
|
||||
|
||||
HADOOP-11958. MetricsSystemImpl fails to show backtrace when an error
|
||||
occurs (Jason Lowe via jeagles)
|
||||
|
||||
HADOOP-12158. Improve error message in
|
||||
TestCryptoStreamsWithOpensslAesCtrCryptoCodec when OpenSSL is not
|
||||
installed. (wang)
|
||||
|
||||
HADOOP-12124. Add HTrace support for FsShell (cmccabe)
|
||||
|
||||
HADOOP-12171. Shorten overly-long htrace span names for server (cmccabe)
|
||||
|
||||
HADOOP-12045. Enable LocalFileSystem#setTimes to change atime.
|
||||
(Kazuho Fujii via cnauroth)
|
||||
|
||||
HADOOP-11974. Fix FIONREAD #include on Solaris (Alan Burlison via Colin P.
|
||||
McCabe)
|
||||
|
||||
HADOOP-12193. Rename Touchz.java to Touch.java. (wang)
|
||||
|
||||
HADOOP-12195. Add annotation to package-info.java file to workaround
|
||||
MCOMPILER-205. (wang)
|
||||
|
||||
HADOOP-12201. Add tracing to FileSystem#createFileSystem and Globber#glob
|
||||
(cmccabe)
|
||||
|
||||
HADOOP-12180. Move ResourceCalculatorPlugin from YARN to Common.
|
||||
(Chris Douglas via kasha)
|
||||
|
||||
HADOOP-12210. Collect network usage on the node (Robert Grandl via cdouglas)
|
||||
|
||||
HADOOP-12211. Collect disk usage on the node (Robert Grandl via cdouglas)
|
||||
|
||||
HADOOP-12153. ByteBufferReadable doesn't declare @InterfaceAudience and
|
||||
@InterfaceStability. (Brahma Reddy Battula via ozawa)
|
||||
|
||||
HADOOP-11893. Mark org.apache.hadoop.security.token.Token as
|
||||
@InterfaceAudience.Public. (Brahma Reddy Battula via stevel)
|
||||
|
||||
HADOOP-12081. Fix UserGroupInformation.java to support 64-bit zLinux.
|
||||
(aajisaka)
|
||||
|
||||
HADOOP-12214. Parse 'HadoopArchive' commandline using cli Options.
|
||||
(vinayakumarb)
|
||||
|
||||
HADOOP-12184. Remove unused Linux-specific constants in NativeIO (Martin
|
||||
Walsh via Colin P. McCabe)
|
||||
|
||||
HADOOP-12161. Add getStoragePolicy API to the FileSystem interface.
|
||||
(Brahma Reddy Battula via Arpit Agarwal)
|
||||
|
||||
HADOOP-12189. Improve CallQueueManager#swapQueue to make queue elements
|
||||
drop nearly impossible. (Zhihai Xu via wang)
|
||||
|
||||
HADOOP-12259. Utility to Dynamic port allocation (brahmareddy via rkanter)
|
||||
|
||||
HADOOP-12170. hadoop-common's JNIFlags.cmake is redundant and can be
|
||||
removed (Alan Burlison via Colin P. McCabe)
|
||||
|
||||
HADOOP-11807. add a lint mode to releasedocmaker (ramtin via aw)
|
||||
|
||||
HADOOP-12183. Annotate the HTrace span created by FsShell with the
|
||||
command-line arguments passed by the user (Masatake Iwasaki via Colin P.
|
||||
McCabe)
|
||||
|
||||
HADOOP-12280. Skip unit tests based on maven profile rather than
|
||||
NativeCodeLoader.isNativeCodeLoaded (Masatake Iwasaki via Colin P. McCabe)
|
||||
|
||||
HADOOP-12318. Expose underlying LDAP exceptions in SaslPlainServer. (Mike
|
||||
Yoder via atm)
|
||||
|
||||
HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-11785. Reduce the number of listStatus operation in distcp
|
||||
|
@ -617,7 +764,32 @@ Release 2.8.0 - UNRELEASED
|
|||
HADOOP-11772. RPC Invoker relies on static ClientCache which has
|
||||
synchronized(this) blocks. (wheat9)
|
||||
|
||||
HADOOP-11242. Record the time of calling in tracing span of
|
||||
IPC server. (Masatake Iwasaki via aajisaka)
|
||||
|
||||
HADOOP-11885. hadoop-dist dist-layout-stitching.sh does not work with dash.
|
||||
(wang)
|
||||
|
||||
HADOOP-12104. Migrate Hadoop Pipes native build to new CMake framework
|
||||
(alanburlison via cmccabe)
|
||||
|
||||
HADOOP-12036. Consolidate all of the cmake extensions in one directory
|
||||
(alanburlison via cmccabe)
|
||||
|
||||
HADOOP-12112. Make hadoop-common-project Native code -Wall-clean
|
||||
(alanburlison via cmccabe)
|
||||
|
||||
HADOOP-12172. FsShell mkdir -p makes an unnecessary check for the existence
|
||||
of the parent. (cnauroth)
|
||||
|
||||
HADOOP-12194. Support for incremental generation in the protoc plugin.
|
||||
(wang)
|
||||
|
||||
HADOOP-11878. FileContext#fixRelativePart should check for not null for a
|
||||
more informative exception. (Brahma Reddy Battula via kasha)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
|
||||
is an I/O error during requestShortCircuitShm (cmccabe)
|
||||
|
||||
|
@ -755,7 +927,143 @@ Release 2.8.0 - UNRELEASED
|
|||
HADOOP-8751. NPE in Token.toString() when Token is constructed using null
|
||||
identifier. (kanaka kumar avvaru via aajisaka)
|
||||
|
||||
Release 2.7.1 - UNRELEASED
|
||||
HADOOP-12004. test-patch breaks with reexec in certain situations (Sean
|
||||
Busbey via aw)
|
||||
|
||||
HADOOP-12035. shellcheck plugin displays a wrong version potentially
|
||||
(Kengo Seki via aw)
|
||||
|
||||
HDFS-8429. Avoid stuck threads if there is an error in DomainSocketWatcher
|
||||
that stops the thread. (zhouyingchao via cmccabe)
|
||||
|
||||
HADOOP-11930. test-patch in offline mode should tell maven to be in
|
||||
offline mode (Sean Busbey via aw)
|
||||
|
||||
HADOOP-11959. WASB should configure client side socket timeout in storage
|
||||
client blob request options. (Ivan Mitic via cnauroth)
|
||||
|
||||
HADOOP-12042. Users may see TrashPolicy if hdfs dfs -rm is run
|
||||
(Andreina J via vinayakumarb)
|
||||
|
||||
HADOOP-11991. test-patch.sh isn't re-executed even if smart-apply-patch.sh
|
||||
is modified. (Kengo Seki via aajisaka)
|
||||
|
||||
HADOOP-12018. smart-apply-patch.sh fails if the patch edits CR+LF files
|
||||
and is created by 'git diff --no-prefix'. (Kengo Seki via aajisaka)
|
||||
|
||||
HADOOP-12019. update BUILDING.txt to include python for 'mvn site'
|
||||
in windows (vinayakumarb)
|
||||
|
||||
HADOOP-11994. smart-apply-patch wrongly assumes that git is infallible.
|
||||
(Kengo Seki via Arpit Agarwal)
|
||||
|
||||
HADOOP-11924. Tolerate JDK-8047340-related exceptions in
|
||||
Shell#isSetSidAvailable preventing class init. (Tsuyoshi Ozawa via gera)
|
||||
|
||||
HADOOP-12052 IPC client downgrades all exception types to IOE, breaks
|
||||
callers trying to use them. (Brahma Reddy Battula via stevel)
|
||||
|
||||
HADOOP-12054. RPC client should not retry for InvalidToken exceptions.
|
||||
(Varun Saxena via Arpit Agarwal)
|
||||
|
||||
HADOOP-12073. Azure FileSystem PageBlobInputStream does not return -1 on
|
||||
EOF. (Ivan Mitic via cnauroth)
|
||||
|
||||
HADOOP-7817. RawLocalFileSystem.append() should give FSDataOutputStream
|
||||
with accurate .getPos() (kanaka kumar avvaru via vinayakumarb)
|
||||
|
||||
HADOOP-12074. in Shell.java#runCommand() rethrow InterruptedException as
|
||||
InterruptedIOException (Lavkesh Lahngir via vinayakumarb)
|
||||
|
||||
HADOOP-12001. Fixed LdapGroupsMapping to include configurable Posix UID and
|
||||
GID attributes during the search. (Patrick White via vinodkv)
|
||||
|
||||
HADOOP-12095. org.apache.hadoop.fs.shell.TestCount fails.
|
||||
(Brahma Reddy Battula via aajisaka)
|
||||
|
||||
HADOOP-12076. Incomplete Cache Mechanism in CredentialProvider API.
|
||||
(Larry McCay via cnauroth)
|
||||
|
||||
HADOOP-12119. hadoop fs -expunge does not work for federated namespace
|
||||
(J.Andreina via vinayakumarb)
|
||||
|
||||
HADOOP-12089. StorageException complaining " no lease ID" when updating
|
||||
FolderLastModifiedTime in WASB. (Duo Xu via cnauroth)
|
||||
|
||||
HADOOP-12154. FileSystem#getUsed() returns the file length only from root '/'
|
||||
(J.Andreina via vinayakumarb)
|
||||
|
||||
HADOOP-10798. globStatus() should always return a sorted list of files
|
||||
(cmccabe)
|
||||
|
||||
HADOOP-12159. Move DistCpUtils#compareFs() to org.apache.hadoop.fs.FileUtil
|
||||
and fix for HA namespaces (rchiang via rkanter)
|
||||
|
||||
HADOOP-12116. Fix unrecommended syntax usages in hadoop/hdfs/yarn script for
|
||||
cygwin in branch-2. (Li Lu via cnauroth)
|
||||
|
||||
HADOOP-12164. Fix TestMove and TestFsShellReturnCode failed to get command
|
||||
name using reflection. (Lei (Eddy) Xu)
|
||||
|
||||
HADOOP-12117. Potential NPE from Configuration#loadProperty with
|
||||
allowNullValueProperties set. (zhihai xu via vinayakumarb)
|
||||
|
||||
HADOOP-12200. TestCryptoStreamsWithOpensslAesCtrCryptoCodec should be
|
||||
skipped in non-native profile. (Masatake Iwasaki via aajisaka)
|
||||
|
||||
HADOOP-10615. FileInputStream in JenkinsHash#main() is never closed.
|
||||
(Chen He via ozawa)
|
||||
|
||||
HADOOP-12240. Fix tests requiring native library to be skipped in non-native
|
||||
profile. (Masatake Iwasaki via ozawa)
|
||||
|
||||
HADOOP-12235 hadoop-openstack junit & mockito dependencies should be
|
||||
"provided". (Ted Yu via stevel)
|
||||
|
||||
HADOOP-12209 Comparable type should be in FileStatus.
|
||||
(Yong Zhang via stevel)
|
||||
|
||||
HADOOP-12088. KMSClientProvider uses equalsIgnoreCase("application/json").
|
||||
(Brahma Reddy Battula via stevel)
|
||||
|
||||
HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
|
||||
over getMessage() in logging/span events. (Varun Saxena via stevel)
|
||||
|
||||
HADOOP-12017. Hadoop archives command should use configurable replication
|
||||
factor when closing (Bibin A Chundatt via vinayakumarb)
|
||||
|
||||
HADOOP-12239. StorageException complaining " no lease ID" when updating
|
||||
FolderLastModifiedTime in WASB. (Duo Xu via cnauroth)
|
||||
|
||||
HADOOP-12245. References to misspelled REMAINING_QUATA in
|
||||
FileSystemShell.md. (Gabor Liptak via aajisaka)
|
||||
|
||||
HADOOP-12175. FsShell must load SpanReceierHost to support tracing
|
||||
(Masatake Iwasaki via Colin P. McCabe)
|
||||
|
||||
HADOOP-10945. 4-digit octal umask permissions throws a parse error (Chang
|
||||
Li via jlowe)
|
||||
|
||||
HADOOP-7824. NativeIO.java flags and identifiers must be set correctly for
|
||||
each platform, not hardcoded to their Linux values (Martin Walsh via Colin
|
||||
P. McCabe)
|
||||
|
||||
HADOOP-12268. AbstractContractAppendTest#testRenameFileBeingAppended
|
||||
misses rename operation. (Zhihai Xu)
|
||||
|
||||
HADOOP-12274. Remove direct download link from BULIDING.txt.
|
||||
(Caleb Severn via aajisaka)
|
||||
|
||||
HADOOP-12302. Fix native compilation on Windows after HADOOP-7824
|
||||
(Vinayakumar B via Colin P. McCabe)
|
||||
|
||||
HADOOP-12258. Need translate java.nio.file.NoSuchFileException to
|
||||
FileNotFoundException to avoid regression. (Zhihai Xu via cnauroth)
|
||||
|
||||
HADOOP-12322. typos in rpcmetrics.java. (Anu Engineer via
|
||||
Arpit Agarwal)
|
||||
|
||||
Release 2.7.2 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
|
@ -763,6 +1071,38 @@ Release 2.7.1 - UNRELEASED
|
|||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-12232. Upgrade Tomcat dependency to 6.0.44. (cnauroth)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-12186. ActiveStandbyElector shouldn't call monitorLockNodeAsync
|
||||
multiple times (zhihai xu via vinayakumarb)
|
||||
|
||||
HADOOP-12191. Bzip2Factory is not thread safe. (Brahma Reddy Battula
|
||||
via ozawa)
|
||||
|
||||
HDFS-8767. RawLocalFileSystem.listStatus() returns null for UNIX pipefile.
|
||||
(kanaka kumar avvaru via wheat9)
|
||||
|
||||
HADOOP-12304. Applications using FileContext fail with the default file
|
||||
system configured to be wasb/s3/etc. (cnauroth)
|
||||
|
||||
HADOOP-11932. MetricsSinkAdapter may hang when being stopped.
|
||||
(Brahma Reddy Battula via jianhe)
|
||||
|
||||
Release 2.7.1 - 2015-07-06
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-12103. Small refactoring of DelegationTokenAuthenticationFilter to
|
||||
allow code sharing. (Yongjun Zhang)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -790,6 +1130,19 @@ Release 2.7.1 - UNRELEASED
|
|||
HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get
|
||||
created with ACLs. (Gregory Chanan via asuresh)
|
||||
|
||||
HADOOP-11934. Use of JavaKeyStoreProvider in LdapGroupsMapping causes
|
||||
infinite loop. (Larry McCay via cnauroth)
|
||||
|
||||
HADOOP-12058. Fix dead links to DistCp and Hadoop Archives pages.
|
||||
(Kazuho Fujii via aajisaka)
|
||||
|
||||
HADOOP-12078. The default retry policy does not handle RetriableException
|
||||
correctly. (Arpit Agarwal)
|
||||
|
||||
HADOOP-12100. ImmutableFsPermission should not override applyUmask since
|
||||
that method doesn't modify the FsPermission (Bibin A Chundatt via Colin P.
|
||||
McCabe)
|
||||
|
||||
Release 2.7.0 - 2015-04-20
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -843,8 +1196,6 @@ Release 2.7.0 - 2015-04-20
|
|||
|
||||
HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
|
||||
|
||||
HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. (Stephen Chu via wheat9)
|
||||
|
||||
HADOOP-11291. Log the cause of SASL connection failures.
|
||||
(Stephen Chu via cnauroth)
|
||||
|
||||
|
@ -1145,9 +1496,6 @@ Release 2.7.0 - 2015-04-20
|
|||
HADOOP-11300. KMS startup scripts must not display the keystore /
|
||||
truststore passwords. (Arun Suresh via wang)
|
||||
|
||||
HADOOP-11333. Fix deadlock in DomainSocketWatcher when the notification
|
||||
pipe is full (zhaoyunjiong via cmccabe)
|
||||
|
||||
HADOOP-11337. KeyAuthorizationKeyProvider access checks need to be done
|
||||
atomically. (Dian Fu via wang)
|
||||
|
||||
|
@ -1521,6 +1869,9 @@ Release 2.6.1 - UNRELEASED
|
|||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-7139. Allow appending to existing SequenceFiles
|
||||
(kanaka kumar avvaru via vinayakumarb)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -1529,6 +1880,11 @@ Release 2.6.1 - UNRELEASED
|
|||
architecture because it is slower there (Suman Somasundar via Colin P.
|
||||
McCabe)
|
||||
|
||||
HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. (Stephen Chu via wheat9)
|
||||
|
||||
HADOOP-11333. Fix deadlock in DomainSocketWatcher when the notification
|
||||
pipe is full (zhaoyunjiong via cmccabe)
|
||||
|
||||
Release 2.6.0 - 2014-11-18
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -0,0 +1,207 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#
|
||||
# Common CMake utilities and configuration, shared by all Native components.
|
||||
#
|
||||
|
||||
#
|
||||
# Platform-specific prerequisite checks.
|
||||
#
|
||||
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
|
||||
# Only 64-bit Java is supported.
|
||||
if(NOT JVM_ARCH_DATA_MODEL EQUAL 64)
|
||||
message(FATAL_ERROR "Unrecognised JVM_ARCH_DATA_MODEL '${JVM_ARCH_DATA_MODEL}'. "
|
||||
"A 64-bit JVM must be used on Solaris, make sure that one is installed and, "
|
||||
"if necessary, the MAVEN_OPTS environment variable includes '-d64'")
|
||||
endif()
|
||||
|
||||
# Only gcc is suported for now.
|
||||
if(NOT(CMAKE_COMPILER_IS_GNUCC AND CMAKE_COMPILER_IS_GNUCXX))
|
||||
message(FATAL_ERROR "Only gcc is supported on Solaris")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
#
|
||||
# Helper functions and macros.
|
||||
#
|
||||
|
||||
# Add flags to all the CMake compiler variables
|
||||
macro(hadoop_add_compiler_flags FLAGS)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAGS}")
|
||||
endmacro()
|
||||
|
||||
# Add flags to all the CMake linker variables
|
||||
macro(hadoop_add_linker_flags FLAGS)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAGS}")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAGS}")
|
||||
set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} ${FLAGS}")
|
||||
endmacro()
|
||||
|
||||
# Compile a library with both shared and static variants.
|
||||
function(hadoop_add_dual_library LIBNAME)
|
||||
add_library(${LIBNAME} SHARED ${ARGN})
|
||||
add_library(${LIBNAME}_static STATIC ${ARGN})
|
||||
set_target_properties(${LIBNAME}_static PROPERTIES OUTPUT_NAME ${LIBNAME})
|
||||
endfunction()
|
||||
|
||||
# Link both a static and a dynamic target against some libraries.
|
||||
function(hadoop_target_link_dual_libraries LIBNAME)
|
||||
target_link_libraries(${LIBNAME} ${ARGN})
|
||||
target_link_libraries(${LIBNAME}_static ${ARGN})
|
||||
endfunction()
|
||||
|
||||
# Set all the output directories to the same place.
|
||||
function(hadoop_output_directory TGT DIR)
|
||||
set_target_properties(${TGT} PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
|
||||
set_target_properties(${TGT} PROPERTIES ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
|
||||
set_target_properties(${TGT} PROPERTIES LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
|
||||
endfunction()
|
||||
|
||||
# Set the target directories for dynamic and static builds.
|
||||
function(hadoop_dual_output_directory TGT DIR)
|
||||
hadoop_output_directory(${TGT} "${DIR}")
|
||||
hadoop_output_directory(${TGT}_static "${DIR}")
|
||||
endfunction()
|
||||
|
||||
# Alter the behavior of find_package and find_library so that we find only
|
||||
# shared libraries with a given version suffix. You should save
|
||||
# CMAKE_FIND_LIBRARY_SUFFIXES before calling this function and restore it
|
||||
# afterwards. On Windows this function is a no-op. Windows does not encode
|
||||
# version number information information into library path names.
|
||||
macro(hadoop_set_find_shared_library_version LVERS)
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
# Mac OS uses .dylib
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".${LVERS}.dylib")
|
||||
elseif(${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD")
|
||||
# FreeBSD has always .so installed.
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".so")
|
||||
elseif(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
||||
# Windows doesn't support finding shared libraries by version.
|
||||
else()
|
||||
# Most UNIX variants use .so
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".so.${LVERS}")
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
# Alter the behavior of find_package and find_library so that we find only
|
||||
# shared libraries without any version suffix. You should save
|
||||
# CMAKE_FIND_LIBRARY_SUFFIXES before calling this function and restore it
|
||||
# afterwards. On Windows this function is a no-op. Windows does not encode
|
||||
# version number information information into library path names.
|
||||
macro(hadoop_set_find_shared_library_without_version)
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
# Mac OS uses .dylib
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".dylib")
|
||||
elseif(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
||||
# No effect
|
||||
else()
|
||||
# Most UNIX variants use .so
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".so")
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
#
|
||||
# Configuration.
|
||||
#
|
||||
|
||||
# Initialise the shared gcc/g++ flags if they aren't already defined.
|
||||
if(NOT DEFINED GCC_SHARED_FLAGS)
|
||||
set(GCC_SHARED_FLAGS "-g -O2 -Wall -pthread -D_FILE_OFFSET_BITS=64")
|
||||
endif()
|
||||
|
||||
# Add in support other compilers here, if necessary,
|
||||
# the assumption is that GCC or a GCC-compatible compiler is being used.
|
||||
|
||||
# Set the shared GCC-compatible compiler and linker flags.
|
||||
hadoop_add_compiler_flags("${GCC_SHARED_FLAGS}")
|
||||
hadoop_add_linker_flags("${LINKER_SHARED_FLAGS}")
|
||||
|
||||
#
|
||||
# Linux-specific configuration.
|
||||
#
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
# Make GNU extensions available.
|
||||
hadoop_add_compiler_flags("-D_GNU_SOURCE")
|
||||
|
||||
# If JVM_ARCH_DATA_MODEL is 32, compile all binaries as 32-bit.
|
||||
if(JVM_ARCH_DATA_MODEL EQUAL 32)
|
||||
# Force 32-bit code generation on amd64/x86_64, ppc64, sparc64
|
||||
if(CMAKE_COMPILER_IS_GNUCC AND CMAKE_SYSTEM_PROCESSOR MATCHES ".*64")
|
||||
hadoop_add_compiler_flags("-m32")
|
||||
hadoop_add_linker_flags("-m32")
|
||||
endif()
|
||||
# Set CMAKE_SYSTEM_PROCESSOR to ensure that find_package(JNI) will use 32-bit libraries
|
||||
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
|
||||
set(CMAKE_SYSTEM_PROCESSOR "i686")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Determine float ABI of JVM on ARM.
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm")
|
||||
find_program(READELF readelf)
|
||||
if(READELF MATCHES "NOTFOUND")
|
||||
message(WARNING "readelf not found; JVM float ABI detection disabled")
|
||||
else(READELF MATCHES "NOTFOUND")
|
||||
execute_process(
|
||||
COMMAND ${READELF} -A ${JAVA_JVM_LIBRARY}
|
||||
OUTPUT_VARIABLE JVM_ELF_ARCH
|
||||
ERROR_QUIET)
|
||||
if(NOT JVM_ELF_ARCH MATCHES "Tag_ABI_VFP_args: VFP registers")
|
||||
# Test compilation with -mfloat-abi=softfp using an arbitrary libc function
|
||||
# (typically fails with "fatal error: bits/predefs.h: No such file or directory"
|
||||
# if soft-float dev libraries are not installed)
|
||||
message("Soft-float JVM detected")
|
||||
include(CMakePushCheckState)
|
||||
cmake_push_check_state()
|
||||
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -mfloat-abi=softfp")
|
||||
include(CheckSymbolExists)
|
||||
check_symbol_exists(exit stdlib.h SOFTFP_AVAILABLE)
|
||||
if(NOT SOFTFP_AVAILABLE)
|
||||
message(FATAL_ERROR "Soft-float dev libraries required (e.g. 'apt-get install libc6-dev-armel' on Debian/Ubuntu)")
|
||||
endif()
|
||||
cmake_pop_check_state()
|
||||
hadoop_add_compiler_flags("-mfloat-abi=softfp")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
#
|
||||
# Solaris-specific configuration.
|
||||
#
|
||||
elseif(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
|
||||
# Solaris flags. 64-bit compilation is mandatory, and is checked earlier.
|
||||
hadoop_add_compiler_flags("-m64 -D__EXTENSIONS__ -D_POSIX_PTHREAD_SEMANTICS -D_XOPEN_SOURCE=500")
|
||||
hadoop_add_linker_flags("-m64")
|
||||
|
||||
# CMAKE_SYSTEM_PROCESSOR is set to the output of 'uname -p', which on Solaris is
|
||||
# the 'lowest' ISA supported, i.e. 'i386' or 'sparc'. However in order for the
|
||||
# standard CMake modules to look in the right places it needs to reflect the required
|
||||
# compilation mode, i.e. 64 bit. We therefore force it to either 'amd64' or 'sparcv9'.
|
||||
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "i386")
|
||||
set(CMAKE_SYSTEM_PROCESSOR "amd64")
|
||||
set(CMAKE_LIBRARY_ARCHITECTURE "amd64")
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "sparc")
|
||||
set(CMAKE_SYSTEM_PROCESSOR STREQUAL "sparcv9")
|
||||
set(CMAKE_LIBRARY_ARCHITECTURE "sparcv9")
|
||||
else()
|
||||
message(FATAL_ERROR "Unrecognised CMAKE_SYSTEM_PROCESSOR ${CMAKE_SYSTEM_PROCESSOR}")
|
||||
endif()
|
||||
endif()
|
|
@ -0,0 +1,97 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#
|
||||
# Common JNI detection for CMake, shared by all Native components.
|
||||
#
|
||||
|
||||
# Check the JVM_ARCH_DATA_MODEL variable as been set to 32 or 64 by maven.
|
||||
if(NOT DEFINED JVM_ARCH_DATA_MODEL)
|
||||
message(FATAL_ERROR "JVM_ARCH_DATA_MODEL is not defined")
|
||||
elseif(NOT (JVM_ARCH_DATA_MODEL EQUAL 32 OR JVM_ARCH_DATA_MODEL EQUAL 64))
|
||||
message(FATAL_ERROR "JVM_ARCH_DATA_MODEL is not 32 or 64")
|
||||
endif()
|
||||
|
||||
#
|
||||
# Linux-specific JNI configuration.
|
||||
#
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
# Locate JNI_INCLUDE_DIRS and JNI_LIBRARIES.
|
||||
# Since we were invoked from Maven, we know that the JAVA_HOME environment
|
||||
# variable is valid. So we ignore system paths here and just use JAVA_HOME.
|
||||
file(TO_CMAKE_PATH "$ENV{JAVA_HOME}" _java_home)
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^i.86$")
|
||||
set(_java_libarch "i386")
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
|
||||
set(_java_libarch "amd64")
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm")
|
||||
set(_java_libarch "arm")
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64le")
|
||||
if(EXISTS "${_java_home}/jre/lib/ppc64le")
|
||||
set(_java_libarch "ppc64le")
|
||||
else()
|
||||
set(_java_libarch "ppc64")
|
||||
endif()
|
||||
else()
|
||||
set(_java_libarch ${CMAKE_SYSTEM_PROCESSOR})
|
||||
endif()
|
||||
set(_JDK_DIRS "${_java_home}/jre/lib/${_java_libarch}/*"
|
||||
"${_java_home}/jre/lib/${_java_libarch}"
|
||||
"${_java_home}/jre/lib/*"
|
||||
"${_java_home}/jre/lib"
|
||||
"${_java_home}/lib/*"
|
||||
"${_java_home}/lib"
|
||||
"${_java_home}/include/*"
|
||||
"${_java_home}/include"
|
||||
"${_java_home}"
|
||||
)
|
||||
find_path(JAVA_INCLUDE_PATH
|
||||
NAMES jni.h
|
||||
PATHS ${_JDK_DIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
#In IBM java, it's jniport.h instead of jni_md.h
|
||||
find_path(JAVA_INCLUDE_PATH2
|
||||
NAMES jni_md.h jniport.h
|
||||
PATHS ${_JDK_DIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
set(JNI_INCLUDE_DIRS ${JAVA_INCLUDE_PATH} ${JAVA_INCLUDE_PATH2})
|
||||
find_library(JAVA_JVM_LIBRARY
|
||||
NAMES jvm JavaVM
|
||||
PATHS ${_JDK_DIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
set(JNI_LIBRARIES ${JAVA_JVM_LIBRARY})
|
||||
unset(_java_libarch)
|
||||
unset(_java_home)
|
||||
|
||||
message("JAVA_HOME=${JAVA_HOME}, JAVA_JVM_LIBRARY=${JAVA_JVM_LIBRARY}")
|
||||
message("JAVA_INCLUDE_PATH=${JAVA_INCLUDE_PATH}, JAVA_INCLUDE_PATH2=${JAVA_INCLUDE_PATH2}")
|
||||
if(JAVA_JVM_LIBRARY AND JAVA_INCLUDE_PATH AND JAVA_INCLUDE_PATH2)
|
||||
message("Located all JNI components successfully.")
|
||||
else()
|
||||
message(FATAL_ERROR "Failed to find a viable JVM installation under JAVA_HOME.")
|
||||
endif()
|
||||
|
||||
# Use the standard FindJNI module to locate the JNI components.
|
||||
find_package(JNI REQUIRED)
|
||||
|
||||
#
|
||||
# Otherwise, use the standard FindJNI module to locate the JNI components.
|
||||
#
|
||||
else()
|
||||
find_package(JNI REQUIRED)
|
||||
endif()
|
|
@ -238,6 +238,11 @@
|
|||
<artifactId>jsr305</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.sshd</groupId>
|
||||
<artifactId>sshd-core</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.htrace</groupId>
|
||||
|
@ -488,7 +493,6 @@
|
|||
<exclude>CHANGES.txt</exclude>
|
||||
<exclude>.idea/**</exclude>
|
||||
<exclude>src/main/conf/*</exclude>
|
||||
<exclude>src/main/docs/**</exclude>
|
||||
<exclude>dev-support/jdiff/**</exclude>
|
||||
<exclude>src/main/native/*</exclude>
|
||||
<exclude>src/main/native/config/*</exclude>
|
||||
|
@ -667,7 +671,7 @@
|
|||
<goals><goal>run</goal></goals>
|
||||
<configuration>
|
||||
<target>
|
||||
<exec executable="sh" failonerror="true" dir="${project.build.directory}/native">
|
||||
<exec executable="${shell-executable}" failonerror="true" dir="${project.build.directory}/native">
|
||||
<arg value="-c"/>
|
||||
<arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_bulk_crc32"/>
|
||||
<env key="SKIPTESTS" value="${skipTests}"/>
|
||||
|
@ -954,6 +958,39 @@
|
|||
</build>
|
||||
</profile>
|
||||
|
||||
<!-- profile to test shell code -->
|
||||
<profile>
|
||||
<id>shelltest</id>
|
||||
<activation>
|
||||
<activeByDefault>true</activeByDefault>
|
||||
</activation>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>common-test-bats-driver</id>
|
||||
<phase>process-test-classes</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<target>
|
||||
<exec dir="src/test/scripts"
|
||||
executable="bash"
|
||||
failonerror="true">
|
||||
<arg value="./run-bats.sh" />
|
||||
</exec>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
|
||||
</profiles>
|
||||
</project>
|
||||
|
||||
|
|
|
@ -16,170 +16,101 @@
|
|||
# limitations under the License.
|
||||
#
|
||||
|
||||
#
|
||||
# CMake configuration.
|
||||
#
|
||||
|
||||
cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
|
||||
|
||||
# Default to release builds
|
||||
set(CMAKE_BUILD_TYPE, Release)
|
||||
list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/..)
|
||||
include(HadoopCommon)
|
||||
|
||||
include(JNIFlags.cmake NO_POLICY_SCOPE)
|
||||
|
||||
# Compile a library with both shared and static variants
|
||||
function(add_dual_library LIBNAME)
|
||||
add_library(${LIBNAME} SHARED ${ARGN})
|
||||
add_library(${LIBNAME}_static STATIC ${ARGN})
|
||||
set_target_properties(${LIBNAME}_static PROPERTIES OUTPUT_NAME ${LIBNAME})
|
||||
endfunction(add_dual_library)
|
||||
|
||||
# Link both a static and a dynamic target against some libraries
|
||||
function(target_link_dual_libraries LIBNAME)
|
||||
target_link_libraries(${LIBNAME} ${ARGN})
|
||||
target_link_libraries(${LIBNAME}_static ${ARGN})
|
||||
endfunction(target_link_dual_libraries)
|
||||
|
||||
function(output_directory TGT DIR)
|
||||
SET_TARGET_PROPERTIES(${TGT} PROPERTIES
|
||||
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
|
||||
SET_TARGET_PROPERTIES(${TGT} PROPERTIES
|
||||
ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
|
||||
SET_TARGET_PROPERTIES(${TGT} PROPERTIES
|
||||
LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
|
||||
endfunction(output_directory TGT DIR)
|
||||
|
||||
function(dual_output_directory TGT DIR)
|
||||
output_directory(${TGT} "${DIR}")
|
||||
output_directory(${TGT}_static "${DIR}")
|
||||
endfunction(dual_output_directory TGT DIR)
|
||||
# Source and test locations.
|
||||
set(SRC main/native/src/org/apache/hadoop)
|
||||
set(TST main/native/src/test/org/apache/hadoop)
|
||||
|
||||
#
|
||||
# This macro alters the behavior of find_package and find_library.
|
||||
# It does this by setting the CMAKE_FIND_LIBRARY_SUFFIXES global variable.
|
||||
# You should save that variable before calling this function and restore it
|
||||
# after you have accomplished your goal.
|
||||
# Main configuration.
|
||||
#
|
||||
# The behavior is altered in two ways:
|
||||
# 1. We always find shared libraries, never static;
|
||||
# 2. We find shared libraries with the given version number.
|
||||
#
|
||||
# On Windows this function is a no-op. Windows does not encode
|
||||
# version number information information into library path names.
|
||||
#
|
||||
macro(set_find_shared_library_version LVERS)
|
||||
IF(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
# Mac OS uses .dylib
|
||||
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".${LVERS}.dylib")
|
||||
ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD")
|
||||
# FreeBSD has always .so installed.
|
||||
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".so")
|
||||
ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
||||
# Windows doesn't support finding shared libraries by version.
|
||||
ELSE()
|
||||
# Most UNIX variants use .so
|
||||
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".so.${LVERS}")
|
||||
ENDIF()
|
||||
endmacro(set_find_shared_library_version LVERS)
|
||||
|
||||
#
|
||||
# Alter the behavior of find_package and find_library so that we find only
|
||||
# shared libraries without any version suffix. You should save
|
||||
# CMAKE_FIND_LIBRARY_SUFFIXES before calling this function and restore it
|
||||
# afterwards.
|
||||
#
|
||||
macro(set_find_shared_library_without_version)
|
||||
IF(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
# Mac OS uses .dylib
|
||||
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".dylib")
|
||||
ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
||||
# No effect
|
||||
ELSE()
|
||||
# Most UNIX variants use .so
|
||||
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".so")
|
||||
ENDIF()
|
||||
endmacro(set_find_shared_library_without_version)
|
||||
# The caller must specify where the generated headers have been placed.
|
||||
if(NOT GENERATED_JAVAH)
|
||||
message(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH")
|
||||
endif()
|
||||
|
||||
if (NOT GENERATED_JAVAH)
|
||||
# Must identify where the generated headers have been placed
|
||||
MESSAGE(FATAL_ERROR "You must set the cmake variable GENERATED_JAVAH")
|
||||
endif (NOT GENERATED_JAVAH)
|
||||
find_package(JNI REQUIRED)
|
||||
# Configure JNI.
|
||||
include(HadoopJNI)
|
||||
|
||||
SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
set_find_shared_library_version("1")
|
||||
# Require zlib.
|
||||
set(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
hadoop_set_find_shared_library_version("1")
|
||||
find_package(ZLIB REQUIRED)
|
||||
SET(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
get_filename_component(HADOOP_ZLIB_LIBRARY ${ZLIB_LIBRARIES} NAME)
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
|
||||
set(D main/native/src/org/apache/hadoop)
|
||||
set(T main/native/src/test/org/apache/hadoop)
|
||||
|
||||
GET_FILENAME_COMPONENT(HADOOP_ZLIB_LIBRARY ${ZLIB_LIBRARIES} NAME)
|
||||
|
||||
SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
set_find_shared_library_version("1")
|
||||
# Look for bzip2.
|
||||
set(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
hadoop_set_find_shared_library_version("1")
|
||||
find_package(BZip2 QUIET)
|
||||
if (BZIP2_INCLUDE_DIR AND BZIP2_LIBRARIES)
|
||||
GET_FILENAME_COMPONENT(HADOOP_BZIP2_LIBRARY ${BZIP2_LIBRARIES} NAME)
|
||||
if(BZIP2_INCLUDE_DIR AND BZIP2_LIBRARIES)
|
||||
get_filename_component(HADOOP_BZIP2_LIBRARY ${BZIP2_LIBRARIES} NAME)
|
||||
set(BZIP2_SOURCE_FILES
|
||||
"${D}/io/compress/bzip2/Bzip2Compressor.c"
|
||||
"${D}/io/compress/bzip2/Bzip2Decompressor.c")
|
||||
else (BZIP2_INCLUDE_DIR AND BZIP2_LIBRARIES)
|
||||
"${SRC}/io/compress/bzip2/Bzip2Compressor.c"
|
||||
"${SRC}/io/compress/bzip2/Bzip2Decompressor.c")
|
||||
set(REQUIRE_BZIP2 ${REQUIRE_BZIP2}) # Stop warning about unused variable.
|
||||
else()
|
||||
set(BZIP2_SOURCE_FILES "")
|
||||
set(BZIP2_INCLUDE_DIR "")
|
||||
IF(REQUIRE_BZIP2)
|
||||
MESSAGE(FATAL_ERROR "Required bzip2 library and/or header files could not be found.")
|
||||
ENDIF(REQUIRE_BZIP2)
|
||||
endif (BZIP2_INCLUDE_DIR AND BZIP2_LIBRARIES)
|
||||
SET(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
if(REQUIRE_BZIP2)
|
||||
message(FATAL_ERROR "Required bzip2 library and/or header files could not be found.")
|
||||
endif()
|
||||
endif()
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
|
||||
INCLUDE(CheckFunctionExists)
|
||||
INCLUDE(CheckCSourceCompiles)
|
||||
INCLUDE(CheckLibraryExists)
|
||||
CHECK_FUNCTION_EXISTS(sync_file_range HAVE_SYNC_FILE_RANGE)
|
||||
CHECK_FUNCTION_EXISTS(posix_fadvise HAVE_POSIX_FADVISE)
|
||||
CHECK_LIBRARY_EXISTS(dl dlopen "" NEED_LINK_DL)
|
||||
|
||||
SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
set_find_shared_library_version("1")
|
||||
# Require snappy.
|
||||
set(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
hadoop_set_find_shared_library_version("1")
|
||||
find_library(SNAPPY_LIBRARY
|
||||
NAMES snappy
|
||||
PATHS ${CUSTOM_SNAPPY_PREFIX} ${CUSTOM_SNAPPY_PREFIX}/lib
|
||||
${CUSTOM_SNAPPY_PREFIX}/lib64 ${CUSTOM_SNAPPY_LIB})
|
||||
SET(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
find_path(SNAPPY_INCLUDE_DIR
|
||||
NAMES snappy.h
|
||||
PATHS ${CUSTOM_SNAPPY_PREFIX} ${CUSTOM_SNAPPY_PREFIX}/include
|
||||
${CUSTOM_SNAPPY_INCLUDE})
|
||||
if (SNAPPY_LIBRARY AND SNAPPY_INCLUDE_DIR)
|
||||
GET_FILENAME_COMPONENT(HADOOP_SNAPPY_LIBRARY ${SNAPPY_LIBRARY} NAME)
|
||||
if(SNAPPY_LIBRARY AND SNAPPY_INCLUDE_DIR)
|
||||
get_filename_component(HADOOP_SNAPPY_LIBRARY ${SNAPPY_LIBRARY} NAME)
|
||||
set(SNAPPY_SOURCE_FILES
|
||||
"${D}/io/compress/snappy/SnappyCompressor.c"
|
||||
"${D}/io/compress/snappy/SnappyDecompressor.c")
|
||||
else (SNAPPY_LIBRARY AND SNAPPY_INCLUDE_DIR)
|
||||
"${SRC}/io/compress/snappy/SnappyCompressor.c"
|
||||
"${SRC}/io/compress/snappy/SnappyDecompressor.c")
|
||||
set(REQUIRE_SNAPPY ${REQUIRE_SNAPPY}) # Stop warning about unused variable.
|
||||
message(STATUS "Found Snappy: ${SNAPPY_LIBRARY}")
|
||||
else()
|
||||
set(SNAPPY_INCLUDE_DIR "")
|
||||
set(SNAPPY_SOURCE_FILES "")
|
||||
IF(REQUIRE_SNAPPY)
|
||||
MESSAGE(FATAL_ERROR "Required snappy library could not be found. SNAPPY_LIBRARY=${SNAPPY_LIBRARY}, SNAPPY_INCLUDE_DIR=${SNAPPY_INCLUDE_DIR}, CUSTOM_SNAPPY_INCLUDE_DIR=${CUSTOM_SNAPPY_INCLUDE_DIR}, CUSTOM_SNAPPY_PREFIX=${CUSTOM_SNAPPY_PREFIX}, CUSTOM_SNAPPY_INCLUDE=${CUSTOM_SNAPPY_INCLUDE}")
|
||||
ENDIF(REQUIRE_SNAPPY)
|
||||
endif (SNAPPY_LIBRARY AND SNAPPY_INCLUDE_DIR)
|
||||
if(REQUIRE_SNAPPY)
|
||||
message(FATAL_ERROR "Required snappy library could not be found. SNAPPY_LIBRARY=${SNAPPY_LIBRARY}, SNAPPY_INCLUDE_DIR=${SNAPPY_INCLUDE_DIR}, CUSTOM_SNAPPY_INCLUDE_DIR=${CUSTOM_SNAPPY_INCLUDE_DIR}, CUSTOM_SNAPPY_PREFIX=${CUSTOM_SNAPPY_PREFIX}, CUSTOM_SNAPPY_INCLUDE=${CUSTOM_SNAPPY_INCLUDE}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
IF (CMAKE_SYSTEM_PROCESSOR MATCHES "^i.86$" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
|
||||
set(BULK_CRC_ARCH_SOURCE_FIlE "${D}/util/bulk_crc32_x86.c")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
|
||||
set(BULK_CRC_ARCH_SOURCE_FIlE "${D}/util/bulk_crc32_aarch64.c")
|
||||
ELSE()
|
||||
MESSAGE("No HW CRC acceleration for ${CMAKE_SYSTEM_PROCESSOR}, falling back to SW")
|
||||
ENDIF()
|
||||
# Build hardware CRC32 acceleration, if supported on the platform.
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^i.86$" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
|
||||
set(BULK_CRC_ARCH_SOURCE_FIlE "${SRC}/util/bulk_crc32_x86.c")
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
|
||||
set(BULK_CRC_ARCH_SOURCE_FIlE "${SRC}/util/bulk_crc32_aarch64.c")
|
||||
else()
|
||||
message("No HW CRC acceleration for ${CMAKE_SYSTEM_PROCESSOR}, falling back to SW")
|
||||
endif()
|
||||
|
||||
# Find the no-suffix version of libcrypto.
|
||||
# See HADOOP-11216 for details.
|
||||
SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
set_find_shared_library_without_version()
|
||||
SET(OPENSSL_NAME "crypto")
|
||||
IF(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
||||
# Find the no-suffix version of libcrypto/openssl. See HADOOP-11216 for details.
|
||||
set(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
hadoop_set_find_shared_library_without_version()
|
||||
set(OPENSSL_NAME "crypto")
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
||||
SET(OPENSSL_NAME "eay32")
|
||||
ENDIF()
|
||||
MESSAGE("CUSTOM_OPENSSL_PREFIX = ${CUSTOM_OPENSSL_PREFIX}")
|
||||
endif()
|
||||
message("CUSTOM_OPENSSL_PREFIX = ${CUSTOM_OPENSSL_PREFIX}")
|
||||
find_library(OPENSSL_LIBRARY
|
||||
NAMES ${OPENSSL_NAME}
|
||||
PATHS ${CUSTOM_OPENSSL_PREFIX} ${CUSTOM_OPENSSL_PREFIX}/lib
|
||||
|
@ -190,35 +121,44 @@ find_path(OPENSSL_INCLUDE_DIR
|
|||
PATHS ${CUSTOM_OPENSSL_PREFIX} ${CUSTOM_OPENSSL_PREFIX}/include
|
||||
${CUSTOM_OPENSSL_INCLUDE} NO_DEFAULT_PATH)
|
||||
find_path(OPENSSL_INCLUDE_DIR NAMES openssl/evp.h)
|
||||
SET(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
SET(USABLE_OPENSSL 0)
|
||||
if (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR)
|
||||
INCLUDE(CheckCSourceCompiles)
|
||||
SET(OLD_CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES})
|
||||
SET(CMAKE_REQUIRED_INCLUDES ${OPENSSL_INCLUDE_DIR})
|
||||
CHECK_C_SOURCE_COMPILES("#include \"${OPENSSL_INCLUDE_DIR}/openssl/evp.h\"\nint main(int argc, char **argv) { return !EVP_aes_256_ctr; }" HAS_NEW_ENOUGH_OPENSSL)
|
||||
SET(CMAKE_REQUIRED_INCLUDES ${OLD_CMAKE_REQUIRED_INCLUDES})
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
set(USABLE_OPENSSL 0)
|
||||
if(OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR)
|
||||
include(CheckCSourceCompiles)
|
||||
set(OLD_CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES})
|
||||
set(CMAKE_REQUIRED_INCLUDES ${OPENSSL_INCLUDE_DIR})
|
||||
check_c_source_compiles("#include \"${OPENSSL_INCLUDE_DIR}/openssl/evp.h\"\nint main(int argc, char **argv) { return !EVP_aes_256_ctr; }" HAS_NEW_ENOUGH_OPENSSL)
|
||||
set(CMAKE_REQUIRED_INCLUDES ${OLD_CMAKE_REQUIRED_INCLUDES})
|
||||
if(NOT HAS_NEW_ENOUGH_OPENSSL)
|
||||
MESSAGE("The OpenSSL library installed at ${OPENSSL_LIBRARY} is too old. You need a version at least new enough to have EVP_aes_256_ctr.")
|
||||
else(NOT HAS_NEW_ENOUGH_OPENSSL)
|
||||
message("The OpenSSL library installed at ${OPENSSL_LIBRARY} is too old. You need a version at least new enough to have EVP_aes_256_ctr.")
|
||||
else()
|
||||
SET(USABLE_OPENSSL 1)
|
||||
endif(NOT HAS_NEW_ENOUGH_OPENSSL)
|
||||
endif (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR)
|
||||
if (USABLE_OPENSSL)
|
||||
GET_FILENAME_COMPONENT(HADOOP_OPENSSL_LIBRARY ${OPENSSL_LIBRARY} NAME)
|
||||
SET(OPENSSL_SOURCE_FILES
|
||||
"${D}/crypto/OpensslCipher.c"
|
||||
"${D}/crypto/random/OpensslSecureRandom.c")
|
||||
else (USABLE_OPENSSL)
|
||||
MESSAGE("Cannot find a usable OpenSSL library. OPENSSL_LIBRARY=${OPENSSL_LIBRARY}, OPENSSL_INCLUDE_DIR=${OPENSSL_INCLUDE_DIR}, CUSTOM_OPENSSL_LIB=${CUSTOM_OPENSSL_LIB}, CUSTOM_OPENSSL_PREFIX=${CUSTOM_OPENSSL_PREFIX}, CUSTOM_OPENSSL_INCLUDE=${CUSTOM_OPENSSL_INCLUDE}")
|
||||
IF(REQUIRE_OPENSSL)
|
||||
MESSAGE(FATAL_ERROR "Terminating build because require.openssl was specified.")
|
||||
ENDIF(REQUIRE_OPENSSL)
|
||||
SET(OPENSSL_LIBRARY "")
|
||||
SET(OPENSSL_INCLUDE_DIR "")
|
||||
SET(OPENSSL_SOURCE_FILES "")
|
||||
endif (USABLE_OPENSSL)
|
||||
endif()
|
||||
endif()
|
||||
if(USABLE_OPENSSL)
|
||||
get_filename_component(HADOOP_OPENSSL_LIBRARY ${OPENSSL_LIBRARY} NAME)
|
||||
set(OPENSSL_SOURCE_FILES
|
||||
"${SRC}/crypto/OpensslCipher.c"
|
||||
"${SRC}/crypto/random/OpensslSecureRandom.c")
|
||||
set(REQUIRE_OPENSSL ${REQUIRE_OPENSSL}) # Stop warning about unused variable.
|
||||
else()
|
||||
message("Cannot find a usable OpenSSL library. OPENSSL_LIBRARY=${OPENSSL_LIBRARY}, OPENSSL_INCLUDE_DIR=${OPENSSL_INCLUDE_DIR}, CUSTOM_OPENSSL_LIB=${CUSTOM_OPENSSL_LIB}, CUSTOM_OPENSSL_PREFIX=${CUSTOM_OPENSSL_PREFIX}, CUSTOM_OPENSSL_INCLUDE=${CUSTOM_OPENSSL_INCLUDE}")
|
||||
if(REQUIRE_OPENSSL)
|
||||
message(FATAL_ERROR "Terminating build because require.openssl was specified.")
|
||||
endif()
|
||||
set(OPENSSL_LIBRARY "")
|
||||
set(OPENSSL_INCLUDE_DIR "")
|
||||
set(OPENSSL_SOURCE_FILES "")
|
||||
endif()
|
||||
|
||||
# Check for platform-specific functions and libraries.
|
||||
include(CheckFunctionExists)
|
||||
include(CheckLibraryExists)
|
||||
check_function_exists(sync_file_range HAVE_SYNC_FILE_RANGE)
|
||||
check_function_exists(posix_fadvise HAVE_POSIX_FADVISE)
|
||||
check_library_exists(dl dlopen "" NEED_LINK_DL)
|
||||
|
||||
# Configure the build.
|
||||
include_directories(
|
||||
${GENERATED_JAVAH}
|
||||
main/native/src
|
||||
|
@ -230,66 +170,60 @@ include_directories(
|
|||
${BZIP2_INCLUDE_DIR}
|
||||
${SNAPPY_INCLUDE_DIR}
|
||||
${OPENSSL_INCLUDE_DIR}
|
||||
${D}/util
|
||||
${SRC}/util
|
||||
)
|
||||
CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
|
||||
configure_file(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
|
||||
|
||||
add_executable(test_bulk_crc32
|
||||
${D}/util/bulk_crc32.c
|
||||
${BULK_CRC_ARCH_SOURCE_FIlE}
|
||||
${T}/util/test_bulk_crc32.c
|
||||
)
|
||||
|
||||
SET(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
|
||||
add_dual_library(hadoop
|
||||
set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
|
||||
hadoop_add_dual_library(hadoop
|
||||
main/native/src/exception.c
|
||||
${D}/io/compress/lz4/Lz4Compressor.c
|
||||
${D}/io/compress/lz4/Lz4Decompressor.c
|
||||
${D}/io/compress/lz4/lz4.c
|
||||
${D}/io/compress/lz4/lz4hc.c
|
||||
${SRC}/io/compress/lz4/Lz4Compressor.c
|
||||
${SRC}/io/compress/lz4/Lz4Decompressor.c
|
||||
${SRC}/io/compress/lz4/lz4.c
|
||||
${SRC}/io/compress/lz4/lz4hc.c
|
||||
${SNAPPY_SOURCE_FILES}
|
||||
${OPENSSL_SOURCE_FILES}
|
||||
${D}/io/compress/zlib/ZlibCompressor.c
|
||||
${D}/io/compress/zlib/ZlibDecompressor.c
|
||||
${SRC}/io/compress/zlib/ZlibCompressor.c
|
||||
${SRC}/io/compress/zlib/ZlibDecompressor.c
|
||||
${BZIP2_SOURCE_FILES}
|
||||
${D}/io/nativeio/NativeIO.c
|
||||
${D}/io/nativeio/errno_enum.c
|
||||
${D}/io/nativeio/file_descriptor.c
|
||||
${D}/io/nativeio/SharedFileDescriptorFactory.c
|
||||
${D}/net/unix/DomainSocket.c
|
||||
${D}/net/unix/DomainSocketWatcher.c
|
||||
${D}/security/JniBasedUnixGroupsMapping.c
|
||||
${D}/security/JniBasedUnixGroupsNetgroupMapping.c
|
||||
${D}/security/hadoop_group_info.c
|
||||
${D}/security/hadoop_user_info.c
|
||||
${D}/util/NativeCodeLoader.c
|
||||
${D}/util/NativeCrc32.c
|
||||
${D}/util/bulk_crc32.c
|
||||
${SRC}/io/nativeio/NativeIO.c
|
||||
${SRC}/io/nativeio/errno_enum.c
|
||||
${SRC}/io/nativeio/file_descriptor.c
|
||||
${SRC}/io/nativeio/SharedFileDescriptorFactory.c
|
||||
${SRC}/net/unix/DomainSocket.c
|
||||
${SRC}/net/unix/DomainSocketWatcher.c
|
||||
${SRC}/security/JniBasedUnixGroupsMapping.c
|
||||
${SRC}/security/JniBasedUnixGroupsNetgroupMapping.c
|
||||
${SRC}/security/hadoop_group_info.c
|
||||
${SRC}/security/hadoop_user_info.c
|
||||
${SRC}/util/NativeCodeLoader.c
|
||||
${SRC}/util/NativeCrc32.c
|
||||
${SRC}/util/bulk_crc32.c
|
||||
${BULK_CRC_ARCH_SOURCE_FIlE}
|
||||
)
|
||||
if (NEED_LINK_DL)
|
||||
if(NEED_LINK_DL)
|
||||
set(LIB_DL dl)
|
||||
endif (NEED_LINK_DL)
|
||||
endif()
|
||||
|
||||
IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
|
||||
#
|
||||
# By embedding '$ORIGIN' into the RPATH of libhadoop.so,
|
||||
# dlopen will look in the directory containing libhadoop.so.
|
||||
# However, $ORIGIN is not supported by all operating systems.
|
||||
#
|
||||
hadoop_target_link_dual_libraries(hadoop ${LIB_DL} ${JAVA_JVM_LIBRARY})
|
||||
set(LIBHADOOP_VERSION "1.0.0")
|
||||
set_target_properties(hadoop PROPERTIES SOVERSION ${LIBHADOOP_VERSION})
|
||||
hadoop_dual_output_directory(hadoop target/usr/local/lib)
|
||||
|
||||
# By embedding '$ORIGIN' into the RPATH of libhadoop.so, dlopen will look in
|
||||
# the directory containing libhadoop.so. However, $ORIGIN is not supported by
|
||||
# all operating systems.
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Linux|SunOS")
|
||||
set(RPATH "\$ORIGIN/")
|
||||
if (EXTRA_LIBHADOOP_RPATH)
|
||||
if(EXTRA_LIBHADOOP_RPATH)
|
||||
set(RPATH "${RPATH}:${EXTRA_LIBHADOOP_RPATH}/")
|
||||
endif(EXTRA_LIBHADOOP_RPATH)
|
||||
SET_TARGET_PROPERTIES(hadoop
|
||||
PROPERTIES INSTALL_RPATH "${RPATH}")
|
||||
ENDIF()
|
||||
endif()
|
||||
set_target_properties(hadoop PROPERTIES INSTALL_RPATH "${RPATH}")
|
||||
endif()
|
||||
|
||||
target_link_dual_libraries(hadoop
|
||||
${LIB_DL}
|
||||
${JAVA_JVM_LIBRARY}
|
||||
# Build the CRC32 test executable.
|
||||
add_executable(test_bulk_crc32
|
||||
${SRC}/util/bulk_crc32.c
|
||||
${BULK_CRC_ARCH_SOURCE_FIlE}
|
||||
${TST}/util/test_bulk_crc32.c
|
||||
)
|
||||
SET(LIBHADOOP_VERSION "1.0.0")
|
||||
SET_TARGET_PROPERTIES(hadoop PROPERTIES
|
||||
SOVERSION ${LIBHADOOP_VERSION})
|
||||
dual_output_directory(hadoop target/usr/local/lib)
|
||||
|
|
|
@ -1,124 +0,0 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
|
||||
|
||||
# If JVM_ARCH_DATA_MODEL is 32, compile all binaries as 32-bit.
|
||||
# This variable is set by maven.
|
||||
if (JVM_ARCH_DATA_MODEL EQUAL 32)
|
||||
# Force 32-bit code generation on amd64/x86_64, ppc64, sparc64
|
||||
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_SYSTEM_PROCESSOR MATCHES ".*64")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m32")
|
||||
set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -m32")
|
||||
endif ()
|
||||
if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
|
||||
# Set CMAKE_SYSTEM_PROCESSOR to ensure that find_package(JNI) will use
|
||||
# the 32-bit version of libjvm.so.
|
||||
set(CMAKE_SYSTEM_PROCESSOR "i686")
|
||||
endif ()
|
||||
endif (JVM_ARCH_DATA_MODEL EQUAL 32)
|
||||
|
||||
# Determine float ABI of JVM on ARM Linux
|
||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" AND CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
find_program(READELF readelf)
|
||||
if (READELF MATCHES "NOTFOUND")
|
||||
message(WARNING "readelf not found; JVM float ABI detection disabled")
|
||||
else (READELF MATCHES "NOTFOUND")
|
||||
execute_process(
|
||||
COMMAND ${READELF} -A ${JAVA_JVM_LIBRARY}
|
||||
OUTPUT_VARIABLE JVM_ELF_ARCH
|
||||
ERROR_QUIET)
|
||||
if (NOT JVM_ELF_ARCH MATCHES "Tag_ABI_VFP_args: VFP registers")
|
||||
message("Soft-float JVM detected")
|
||||
|
||||
# Test compilation with -mfloat-abi=softfp using an arbitrary libc function
|
||||
# (typically fails with "fatal error: bits/predefs.h: No such file or directory"
|
||||
# if soft-float dev libraries are not installed)
|
||||
include(CMakePushCheckState)
|
||||
cmake_push_check_state()
|
||||
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -mfloat-abi=softfp")
|
||||
include(CheckSymbolExists)
|
||||
check_symbol_exists(exit stdlib.h SOFTFP_AVAILABLE)
|
||||
if (NOT SOFTFP_AVAILABLE)
|
||||
message(FATAL_ERROR "Soft-float dev libraries required (e.g. 'apt-get install libc6-dev-armel' on Debian/Ubuntu)")
|
||||
endif (NOT SOFTFP_AVAILABLE)
|
||||
cmake_pop_check_state()
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=softfp")
|
||||
endif ()
|
||||
endif (READELF MATCHES "NOTFOUND")
|
||||
endif (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" AND CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
|
||||
IF("${CMAKE_SYSTEM}" MATCHES "Linux")
|
||||
#
|
||||
# Locate JNI_INCLUDE_DIRS and JNI_LIBRARIES.
|
||||
# Since we were invoked from Maven, we know that the JAVA_HOME environment
|
||||
# variable is valid. So we ignore system paths here and just use JAVA_HOME.
|
||||
#
|
||||
FILE(TO_CMAKE_PATH "$ENV{JAVA_HOME}" _JAVA_HOME)
|
||||
IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^i.86$")
|
||||
SET(_java_libarch "i386")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
|
||||
SET(_java_libarch "amd64")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm")
|
||||
SET(_java_libarch "arm")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64le")
|
||||
IF(EXISTS "${_JAVA_HOME}/jre/lib/ppc64le")
|
||||
SET(_java_libarch "ppc64le")
|
||||
ELSE()
|
||||
SET(_java_libarch "ppc64")
|
||||
ENDIF()
|
||||
ELSE()
|
||||
SET(_java_libarch ${CMAKE_SYSTEM_PROCESSOR})
|
||||
ENDIF()
|
||||
SET(_JDK_DIRS "${_JAVA_HOME}/jre/lib/${_java_libarch}/*"
|
||||
"${_JAVA_HOME}/jre/lib/${_java_libarch}"
|
||||
"${_JAVA_HOME}/jre/lib/*"
|
||||
"${_JAVA_HOME}/jre/lib"
|
||||
"${_JAVA_HOME}/lib/*"
|
||||
"${_JAVA_HOME}/lib"
|
||||
"${_JAVA_HOME}/include/*"
|
||||
"${_JAVA_HOME}/include"
|
||||
"${_JAVA_HOME}"
|
||||
)
|
||||
FIND_PATH(JAVA_INCLUDE_PATH
|
||||
NAMES jni.h
|
||||
PATHS ${_JDK_DIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
#In IBM java, it's jniport.h instead of jni_md.h
|
||||
FIND_PATH(JAVA_INCLUDE_PATH2
|
||||
NAMES jni_md.h jniport.h
|
||||
PATHS ${_JDK_DIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
SET(JNI_INCLUDE_DIRS ${JAVA_INCLUDE_PATH} ${JAVA_INCLUDE_PATH2})
|
||||
FIND_LIBRARY(JAVA_JVM_LIBRARY
|
||||
NAMES jvm JavaVM
|
||||
PATHS ${_JDK_DIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
SET(JNI_LIBRARIES ${JAVA_JVM_LIBRARY})
|
||||
MESSAGE("JAVA_HOME=${JAVA_HOME}, JAVA_JVM_LIBRARY=${JAVA_JVM_LIBRARY}")
|
||||
MESSAGE("JAVA_INCLUDE_PATH=${JAVA_INCLUDE_PATH}, JAVA_INCLUDE_PATH2=${JAVA_INCLUDE_PATH2}")
|
||||
IF(JAVA_JVM_LIBRARY AND JAVA_INCLUDE_PATH AND JAVA_INCLUDE_PATH2)
|
||||
MESSAGE("Located all JNI components successfully.")
|
||||
ELSE()
|
||||
MESSAGE(FATAL_ERROR "Failed to find a viable JVM installation under JAVA_HOME.")
|
||||
ENDIF()
|
||||
ELSE()
|
||||
find_package(JNI REQUIRED)
|
||||
ENDIF()
|
|
@ -15,37 +15,33 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
function hadoop_usage()
|
||||
{
|
||||
echo "Usage: hadoop [--config confdir] [COMMAND | CLASSNAME]"
|
||||
echo " CLASSNAME run the class named CLASSNAME"
|
||||
echo " or"
|
||||
echo " where COMMAND is one of:"
|
||||
echo " archive -archiveName NAME -p <parent path> <src>* <dest>"
|
||||
echo " create a Hadoop archive"
|
||||
echo " checknative [-a|-h] check native Hadoop and compression "
|
||||
echo " libraries availability"
|
||||
echo " classpath prints the class path needed to get the"
|
||||
echo " Hadoop jar and the required libraries"
|
||||
echo " credential interact with credential providers"
|
||||
echo " daemonlog get/set the log level for each daemon"
|
||||
echo " distch path:owner:group:permisson"
|
||||
echo " distributed metadata changer"
|
||||
echo " distcp <srcurl> <desturl> "
|
||||
echo " copy file or directories recursively"
|
||||
echo " fs run a generic filesystem user client"
|
||||
echo " jar <jar> run a jar file"
|
||||
echo " note: please use \"yarn jar\" to launch"
|
||||
echo " YARN applications, not this command."
|
||||
echo " jnipath prints the java.library.path"
|
||||
echo " kerbname show auth_to_local principal conversion"
|
||||
echo " key manage keys via the KeyProvider"
|
||||
echo " trace view and modify Hadoop tracing settings"
|
||||
echo " version print the version"
|
||||
echo ""
|
||||
echo "Most commands print help when invoked w/o parameters."
|
||||
}
|
||||
MYNAME="${BASH_SOURCE-$0}"
|
||||
|
||||
function hadoop_usage
|
||||
{
|
||||
hadoop_add_option "buildpaths" "attempt to add class files from build tree"
|
||||
hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in slave mode"
|
||||
hadoop_add_option "loglevel level" "set the log4j level for this command"
|
||||
hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
|
||||
hadoop_add_option "slaves" "turn on slave mode"
|
||||
|
||||
hadoop_add_subcommand "archive" "create a Hadoop archive"
|
||||
hadoop_add_subcommand "checknative" "check native Hadoop and compression libraries availability"
|
||||
hadoop_add_subcommand "classpath" "prints the class path needed to get the Hadoop jar and the required libraries"
|
||||
hadoop_add_subcommand "conftest" "validate configuration XML files"
|
||||
hadoop_add_subcommand "credential" "interact with credential providers"
|
||||
hadoop_add_subcommand "daemonlog" "get/set the log level for each daemon"
|
||||
hadoop_add_subcommand "distch" "distributed metadata changer"
|
||||
hadoop_add_subcommand "distcp" "copy file or directories recursively"
|
||||
hadoop_add_subcommand "fs" "run a generic filesystem user client"
|
||||
hadoop_add_subcommand "jar <jar>" "run a jar file. NOTE: please use \"yarn jar\" to launch YARN applications, not this command."
|
||||
hadoop_add_subcommand "jnipath" "prints the java.library.path"
|
||||
hadoop_add_subcommand "kerbname" "show auth_to_local principal conversion"
|
||||
hadoop_add_subcommand "key" "manage keys via the KeyProvider"
|
||||
hadoop_add_subcommand "trace" "view and modify Hadoop tracing settings"
|
||||
hadoop_add_subcommand "version" "print the version"
|
||||
hadoop_generate_usage "${MYNAME}" true
|
||||
}
|
||||
|
||||
# This script runs the hadoop core commands.
|
||||
|
||||
|
@ -53,8 +49,7 @@ function hadoop_usage()
|
|||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
||||
DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
||||
else
|
||||
this="${BASH_SOURCE-$0}"
|
||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
||||
DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||
fi
|
||||
|
||||
|
@ -127,6 +122,9 @@ case ${COMMAND} in
|
|||
classpath)
|
||||
hadoop_do_classpath_subcommand CLASS "$@"
|
||||
;;
|
||||
conftest)
|
||||
CLASS=org.apache.hadoop.util.ConfTest
|
||||
;;
|
||||
credential)
|
||||
CLASS=org.apache.hadoop.security.alias.CredentialShell
|
||||
;;
|
||||
|
|
|
@ -93,75 +93,8 @@ hadoop_bootstrap
|
|||
# shellcheck disable=SC2034
|
||||
HADOOP_USER_PARAMS=("$@")
|
||||
|
||||
HADOOP_DAEMON_MODE="default"
|
||||
|
||||
while [[ -z "${_hadoop_common_done}" ]]; do
|
||||
case $1 in
|
||||
--buildpaths)
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_ENABLE_BUILD_PATHS=true
|
||||
shift
|
||||
;;
|
||||
--config)
|
||||
shift
|
||||
confdir=$1
|
||||
shift
|
||||
if [[ -d "${confdir}" ]]; then
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_CONF_DIR="${confdir}"
|
||||
elif [[ -z "${confdir}" ]]; then
|
||||
hadoop_error "ERROR: No parameter provided for --config "
|
||||
hadoop_exit_with_usage 1
|
||||
else
|
||||
hadoop_error "ERROR: Cannot find configuration directory \"${confdir}\""
|
||||
hadoop_exit_with_usage 1
|
||||
fi
|
||||
;;
|
||||
--daemon)
|
||||
shift
|
||||
HADOOP_DAEMON_MODE=$1
|
||||
shift
|
||||
if [[ -z "${HADOOP_DAEMON_MODE}" || \
|
||||
! "${HADOOP_DAEMON_MODE}" =~ ^st(art|op|atus)$ ]]; then
|
||||
hadoop_error "ERROR: --daemon must be followed by either \"start\", \"stop\", or \"status\"."
|
||||
hadoop_exit_with_usage 1
|
||||
fi
|
||||
;;
|
||||
--debug)
|
||||
shift
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_SHELL_SCRIPT_DEBUG=true
|
||||
;;
|
||||
--help|-help|-h|help|--h|--\?|-\?|\?)
|
||||
hadoop_exit_with_usage 0
|
||||
;;
|
||||
--hostnames)
|
||||
shift
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_SLAVE_NAMES="$1"
|
||||
shift
|
||||
;;
|
||||
--hosts)
|
||||
shift
|
||||
hadoop_populate_slaves_file "$1"
|
||||
shift
|
||||
;;
|
||||
--loglevel)
|
||||
shift
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_LOGLEVEL="$1"
|
||||
shift
|
||||
;;
|
||||
--slaves)
|
||||
shift
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_SLAVE_MODE=true
|
||||
;;
|
||||
*)
|
||||
_hadoop_common_done=true
|
||||
;;
|
||||
esac
|
||||
done
|
||||
hadoop_parse_args "$@"
|
||||
shift "${HADOOP_PARSE_COUNTER}"
|
||||
|
||||
#
|
||||
# Setup the base-line environment
|
||||
|
|
423
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
Normal file → Executable file
|
@ -14,6 +14,11 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# we need to declare this globally as an array, which can only
|
||||
# be done outside of a function
|
||||
declare -a HADOOP_SUBCMD_USAGE
|
||||
declare -a HADOOP_OPTION_USAGE
|
||||
|
||||
## @description Print a message to stderr
|
||||
## @audience public
|
||||
## @stability stable
|
||||
|
@ -36,6 +41,163 @@ function hadoop_debug
|
|||
fi
|
||||
}
|
||||
|
||||
## @description Add a subcommand to the usage output
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable no
|
||||
## @param subcommand
|
||||
## @param subcommanddesc
|
||||
function hadoop_add_subcommand
|
||||
{
|
||||
local subcmd=$1
|
||||
local text=$2
|
||||
|
||||
HADOOP_SUBCMD_USAGE[${HADOOP_SUBCMD_USAGE_COUNTER}]="${subcmd}@${text}"
|
||||
((HADOOP_SUBCMD_USAGE_COUNTER=HADOOP_SUBCMD_USAGE_COUNTER+1))
|
||||
}
|
||||
|
||||
## @description Add an option to the usage output
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable no
|
||||
## @param subcommand
|
||||
## @param subcommanddesc
|
||||
function hadoop_add_option
|
||||
{
|
||||
local option=$1
|
||||
local text=$2
|
||||
|
||||
HADOOP_OPTION_USAGE[${HADOOP_OPTION_USAGE_COUNTER}]="${option}@${text}"
|
||||
((HADOOP_OPTION_USAGE_COUNTER=HADOOP_OPTION_USAGE_COUNTER+1))
|
||||
}
|
||||
|
||||
## @description Reset the usage information to blank
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable no
|
||||
function hadoop_reset_usage
|
||||
{
|
||||
HADOOP_SUBCMD_USAGE=()
|
||||
HADOOP_OPTION_USAGE=()
|
||||
HADOOP_SUBCMD_USAGE_COUNTER=0
|
||||
HADOOP_OPTION_USAGE_COUNTER=0
|
||||
}
|
||||
|
||||
## @description Print a screen-size aware two-column output
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable no
|
||||
## @param array
|
||||
function hadoop_generic_columnprinter
|
||||
{
|
||||
declare -a input=("$@")
|
||||
declare -i i=0
|
||||
declare -i counter=0
|
||||
declare line
|
||||
declare text
|
||||
declare option
|
||||
declare giventext
|
||||
declare -i maxoptsize
|
||||
declare -i foldsize
|
||||
declare -a tmpa
|
||||
declare numcols
|
||||
|
||||
if [[ -n "${COLUMNS}" ]]; then
|
||||
numcols=${COLUMNS}
|
||||
else
|
||||
numcols=$(tput cols) 2>/dev/null
|
||||
fi
|
||||
|
||||
if [[ -z "${numcols}"
|
||||
|| ! "${numcols}" =~ ^[0-9]+$ ]]; then
|
||||
numcols=75
|
||||
else
|
||||
((numcols=numcols-5))
|
||||
fi
|
||||
|
||||
while read -r line; do
|
||||
tmpa[${counter}]=${line}
|
||||
((counter=counter+1))
|
||||
option=$(echo "${line}" | cut -f1 -d'@')
|
||||
if [[ ${#option} -gt ${maxoptsize} ]]; then
|
||||
maxoptsize=${#option}
|
||||
fi
|
||||
done < <(for text in "${input[@]}"; do
|
||||
echo "${text}"
|
||||
done | sort)
|
||||
|
||||
i=0
|
||||
((foldsize=numcols-maxoptsize))
|
||||
|
||||
until [[ $i -eq ${#tmpa[@]} ]]; do
|
||||
option=$(echo "${tmpa[$i]}" | cut -f1 -d'@')
|
||||
giventext=$(echo "${tmpa[$i]}" | cut -f2 -d'@')
|
||||
|
||||
while read -r line; do
|
||||
printf "%-${maxoptsize}s %-s\n" "${option}" "${line}"
|
||||
option=" "
|
||||
done < <(echo "${giventext}"| fold -s -w ${foldsize})
|
||||
((i=i+1))
|
||||
done
|
||||
}
|
||||
|
||||
## @description generate standard usage output
|
||||
## @description and optionally takes a class
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable no
|
||||
## @param execname
|
||||
## @param true|false
|
||||
## @param [text to use in place of SUBCOMMAND]
|
||||
function hadoop_generate_usage
|
||||
{
|
||||
local cmd=$1
|
||||
local takesclass=$2
|
||||
local subcmdtext=${3:-"SUBCOMMAND"}
|
||||
local haveoptions
|
||||
local optstring
|
||||
local havesubs
|
||||
local subcmdstring
|
||||
|
||||
cmd=${cmd##*/}
|
||||
|
||||
if [[ -n "${HADOOP_OPTION_USAGE_COUNTER}"
|
||||
&& "${HADOOP_OPTION_USAGE_COUNTER}" -gt 0 ]]; then
|
||||
haveoptions=true
|
||||
optstring=" [OPTIONS]"
|
||||
fi
|
||||
|
||||
if [[ -n "${HADOOP_SUBCMD_USAGE_COUNTER}"
|
||||
&& "${HADOOP_SUBCMD_USAGE_COUNTER}" -gt 0 ]]; then
|
||||
havesubs=true
|
||||
subcmdstring=" ${subcmdtext} [${subcmdtext} OPTIONS]"
|
||||
fi
|
||||
|
||||
echo "Usage: ${cmd}${optstring}${subcmdstring}"
|
||||
if [[ ${takesclass} = true ]]; then
|
||||
echo " or ${cmd}${optstring} CLASSNAME [CLASSNAME OPTIONS]"
|
||||
echo " where CLASSNAME is a user-provided Java class"
|
||||
fi
|
||||
|
||||
if [[ "${haveoptions}" = true ]]; then
|
||||
echo ""
|
||||
echo " OPTIONS is none or any of:"
|
||||
echo ""
|
||||
|
||||
hadoop_generic_columnprinter "${HADOOP_OPTION_USAGE[@]}"
|
||||
fi
|
||||
|
||||
if [[ "${havesubs}" = true ]]; then
|
||||
echo ""
|
||||
echo " ${subcmdtext} is one of:"
|
||||
echo ""
|
||||
|
||||
hadoop_generic_columnprinter "${HADOOP_SUBCMD_USAGE[@]}"
|
||||
echo ""
|
||||
echo "${subcmdtext} may print help when invoked w/o parameters or with -h."
|
||||
fi
|
||||
}
|
||||
|
||||
## @description Replace `oldvar` with `newvar` if `oldvar` exists.
|
||||
## @audience public
|
||||
## @stability stable
|
||||
|
@ -101,6 +263,9 @@ function hadoop_bootstrap
|
|||
# setup a default TOOL_PATH
|
||||
TOOL_PATH=${TOOL_PATH:-${HADOOP_PREFIX}/share/hadoop/tools/lib/*}
|
||||
|
||||
# usage output set to zero
|
||||
hadoop_reset_usage
|
||||
|
||||
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
|
||||
|
||||
# defaults
|
||||
|
@ -193,6 +358,7 @@ function hadoop_import_shellprofiles
|
|||
|
||||
if [[ -d "${HADOOP_LIBEXEC_DIR}/shellprofile.d" ]]; then
|
||||
files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*.sh)
|
||||
hadoop_debug "shellprofiles: ${files1[*]}"
|
||||
else
|
||||
hadoop_error "WARNING: ${HADOOP_LIBEXEC_DIR}/shellprofile.d doesn't exist. Functionality may not work."
|
||||
fi
|
||||
|
@ -203,7 +369,8 @@ function hadoop_import_shellprofiles
|
|||
|
||||
for i in "${files1[@]}" "${files2[@]}"
|
||||
do
|
||||
if [[ -n "${i}" ]]; then
|
||||
if [[ -n "${i}"
|
||||
&& -f "${i}" ]]; then
|
||||
hadoop_debug "Profiles: importing ${i}"
|
||||
. "${i}"
|
||||
fi
|
||||
|
@ -325,6 +492,26 @@ function hadoop_basic_init
|
|||
export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
|
||||
fi
|
||||
|
||||
if [[ ! -d "${HADOOP_COMMON_HOME}" ]]; then
|
||||
hadoop_error "ERROR: Invalid HADOOP_COMMON_HOME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -d "${HADOOP_HDFS_HOME}" ]]; then
|
||||
hadoop_error "ERROR: Invalid HADOOP_HDFS_HOME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -d "${HADOOP_YARN_HOME}" ]]; then
|
||||
hadoop_error "ERROR: Invalid HADOOP_YARN_HOME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -d "${HADOOP_MAPRED_HOME}" ]]; then
|
||||
hadoop_error "ERROR: Invalid HADOOP_MAPRED_HOME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
|
||||
HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_PREFIX}/logs"}
|
||||
HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
|
||||
|
@ -461,27 +648,33 @@ function hadoop_connect_to_hosts
|
|||
if [[ -z "${SLAVE_NAMES}" ]]; then
|
||||
SLAVE_NAMES=$(sed 's/#.*$//;/^$/d' "${SLAVE_FILE}")
|
||||
fi
|
||||
|
||||
# quoting here gets tricky. it's easier to push it into a function
|
||||
# so that we don't have to deal with it. However...
|
||||
# xargs can't use a function so instead we'll export it out
|
||||
# and force it into a subshell
|
||||
# moral of the story: just use pdsh.
|
||||
export -f hadoop_actual_ssh
|
||||
export HADOOP_SSH_OPTS
|
||||
|
||||
# xargs is used with option -I to replace the placeholder in arguments
|
||||
# list with each hostname read from stdin/pipe. But it consider one
|
||||
# line as one argument while reading from stdin/pipe. So place each
|
||||
# hostname in different lines while passing via pipe.
|
||||
SLAVE_NAMES=$(echo "$SLAVE_NAMES" | tr ' ' '\n' )
|
||||
echo "${SLAVE_NAMES}" | \
|
||||
xargs -n 1 -P"${HADOOP_SSH_PARALLEL}" \
|
||||
-I {} bash -c -- "hadoop_actual_ssh {} ${params}"
|
||||
wait
|
||||
hadoop_connect_to_hosts_without_pdsh "${params}"
|
||||
fi
|
||||
}
|
||||
|
||||
## @description Connect to ${SLAVE_NAMES} and execute command
|
||||
## @description under the environment which does not support pdsh.
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable yes
|
||||
## @param command
|
||||
## @param [...]
|
||||
function hadoop_connect_to_hosts_without_pdsh
|
||||
{
|
||||
# shellcheck disable=SC2124
|
||||
local params="$@"
|
||||
local slaves=(${SLAVE_NAMES})
|
||||
for (( i = 0; i < ${#slaves[@]}; i++ ))
|
||||
do
|
||||
if (( i != 0 && i % HADOOP_SSH_PARALLEL == 0 )); then
|
||||
wait
|
||||
fi
|
||||
# shellcheck disable=SC2086
|
||||
hadoop_actual_ssh "${slaves[$i]}" ${params} &
|
||||
done
|
||||
wait
|
||||
}
|
||||
|
||||
## @description Utility routine to handle --slaves mode
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
|
@ -499,7 +692,7 @@ function hadoop_common_slave_mode_execute
|
|||
# to prevent loops
|
||||
# Also remove --hostnames and --hosts along with arg values
|
||||
local argsSize=${#argv[@]};
|
||||
for (( i = 0; i < $argsSize; i++ ))
|
||||
for (( i = 0; i < argsSize; i++ ))
|
||||
do
|
||||
if [[ "${argv[$i]}" =~ ^--slaves$ ]]; then
|
||||
unset argv[$i]
|
||||
|
@ -510,6 +703,10 @@ function hadoop_common_slave_mode_execute
|
|||
unset argv[$i];
|
||||
fi
|
||||
done
|
||||
if [[ ${QATESTMODE} = true ]]; then
|
||||
echo "${argv[@]}"
|
||||
return
|
||||
fi
|
||||
hadoop_connect_to_hosts -- "${argv[@]}"
|
||||
}
|
||||
|
||||
|
@ -556,8 +753,12 @@ function hadoop_add_param
|
|||
# delimited
|
||||
#
|
||||
if [[ ! ${!1} =~ $2 ]] ; then
|
||||
# shellcheck disable=SC2086
|
||||
eval $1="'${!1} $3'"
|
||||
#shellcheck disable=SC2140
|
||||
eval "$1"="'${!1} $3'"
|
||||
if [[ ${!1:0:1} = ' ' ]]; then
|
||||
#shellcheck disable=SC2140
|
||||
eval "$1"="'${!1# }'"
|
||||
fi
|
||||
hadoop_debug "$1 accepted $3"
|
||||
else
|
||||
hadoop_debug "$1 declined $3"
|
||||
|
@ -595,7 +796,8 @@ function hadoop_add_classpath
|
|||
# for wildcard at end, we can
|
||||
# at least check the dir exists
|
||||
if [[ $1 =~ ^.*\*$ ]]; then
|
||||
local mp=$(dirname "$1")
|
||||
local mp
|
||||
mp=$(dirname "$1")
|
||||
if [[ ! -d "${mp}" ]]; then
|
||||
hadoop_debug "Rejected CLASSPATH: $1 (not a dir)"
|
||||
return 1
|
||||
|
@ -654,7 +856,7 @@ function hadoop_add_colonpath
|
|||
hadoop_debug "Prepend colonpath($1): $2"
|
||||
else
|
||||
# shellcheck disable=SC2086
|
||||
eval $1+="'$2'"
|
||||
eval $1+=":'$2'"
|
||||
hadoop_debug "Append colonpath($1): $2"
|
||||
fi
|
||||
return 0
|
||||
|
@ -693,11 +895,14 @@ function hadoop_add_javalibpath
|
|||
## @return 1 = failure (doesn't exist or some other reason)
|
||||
function hadoop_add_ldlibpath
|
||||
{
|
||||
local status
|
||||
# specialized function for a common use case
|
||||
hadoop_add_colonpath LD_LIBRARY_PATH "$1" "$2"
|
||||
status=$?
|
||||
|
||||
# note that we export this
|
||||
export LD_LIBRARY_PATH
|
||||
return ${status}
|
||||
}
|
||||
|
||||
## @description Add the common/core Hadoop components to the
|
||||
|
@ -705,21 +910,29 @@ function hadoop_add_ldlibpath
|
|||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable yes
|
||||
## @returns 1 on failure, may exit
|
||||
## @returns 0 on success
|
||||
function hadoop_add_common_to_classpath
|
||||
{
|
||||
#
|
||||
# get all of the common jars+config in the path
|
||||
#
|
||||
|
||||
if [[ -z "${HADOOP_COMMON_HOME}"
|
||||
|| -z "${HADOOP_COMMON_DIR}"
|
||||
|| -z "${HADOOP_COMMON_LIB_JARS_DIR}" ]]; then
|
||||
hadoop_debug "COMMON_HOME=${HADOOP_COMMON_HOME}"
|
||||
hadoop_debug "COMMON_DIR=${HADOOP_COMMON_DIR}"
|
||||
hadoop_debug "COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR}"
|
||||
hadoop_error "ERROR: HADOOP_COMMON_HOME or related vars are not configured."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# developers
|
||||
if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
|
||||
hadoop_add_classpath "${HADOOP_COMMON_HOME}/hadoop-common/target/classes"
|
||||
fi
|
||||
|
||||
if [[ -d "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}/webapps" ]]; then
|
||||
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"
|
||||
fi
|
||||
|
||||
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"'/*'
|
||||
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*'
|
||||
}
|
||||
|
@ -738,29 +951,29 @@ function hadoop_add_to_classpath_userpath
|
|||
# set env-var HADOOP_USER_CLASSPATH_FIRST
|
||||
# we'll also dedupe it, because we're cool like that.
|
||||
#
|
||||
local c
|
||||
local array
|
||||
local i
|
||||
local j
|
||||
let c=0
|
||||
declare -a array
|
||||
declare -i c=0
|
||||
declare -i j
|
||||
declare -i i
|
||||
declare idx
|
||||
|
||||
if [[ -n "${HADOOP_CLASSPATH}" ]]; then
|
||||
# I wonder if Java runs on VMS.
|
||||
for i in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do
|
||||
array[$c]=$i
|
||||
let c+=1
|
||||
for idx in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do
|
||||
array[${c}]=${idx}
|
||||
((c=c+1))
|
||||
done
|
||||
let j=c-1
|
||||
((j=c-1))
|
||||
|
||||
if [[ -z "${HADOOP_USE_CLIENT_CLASSLOADER}" ]]; then
|
||||
if [[ -z "${HADOOP_USER_CLASSPATH_FIRST}" ]]; then
|
||||
for ((i=j; i>=0; i--)); do
|
||||
hadoop_add_classpath "${array[$i]}" before
|
||||
done
|
||||
else
|
||||
for ((i=0; i<=j; i++)); do
|
||||
hadoop_add_classpath "${array[$i]}" after
|
||||
done
|
||||
else
|
||||
for ((i=j; i>=0; i--)); do
|
||||
hadoop_add_classpath "${array[$i]}" before
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
@ -780,18 +993,32 @@ function hadoop_os_tricks
|
|||
Darwin)
|
||||
if [[ -z "${JAVA_HOME}" ]]; then
|
||||
if [[ -x /usr/libexec/java_home ]]; then
|
||||
export JAVA_HOME="$(/usr/libexec/java_home)"
|
||||
JAVA_HOME="$(/usr/libexec/java_home)"
|
||||
export JAVA_HOME
|
||||
else
|
||||
export JAVA_HOME=/Library/Java/Home
|
||||
JAVA_HOME=/Library/Java/Home
|
||||
export JAVA_HOME
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
Linux)
|
||||
bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
|
||||
|
||||
# Newer versions of glibc use an arena memory allocator that
|
||||
# causes virtual # memory usage to explode. This interacts badly
|
||||
# with the many threads that we use in Hadoop. Tune the variable
|
||||
# down to prevent vmem explosion.
|
||||
export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
|
||||
# we put this in QA test mode off so that non-Linux can test
|
||||
if [[ "${QATESTMODE}" = true ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# NOTE! HADOOP_ALLOW_IPV6 is a developer hook. We leave it
|
||||
# undocumented in hadoop-env.sh because we don't want users to
|
||||
# shoot themselves in the foot while devs make IPv6 work.
|
||||
|
||||
bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
|
||||
|
||||
if [[ -n "${bindv6only}" ]] &&
|
||||
[[ "${bindv6only}" -eq "1" ]] &&
|
||||
[[ "${HADOOP_ALLOW_IPV6}" != "yes" ]]; then
|
||||
|
@ -800,11 +1027,6 @@ function hadoop_os_tricks
|
|||
hadoop_error "ERROR: For more info: http://wiki.apache.org/hadoop/HadoopIPv6"
|
||||
exit 1
|
||||
fi
|
||||
# Newer versions of glibc use an arena memory allocator that
|
||||
# causes virtual # memory usage to explode. This interacts badly
|
||||
# with the many threads that we use in Hadoop. Tune the variable
|
||||
# down to prevent vmem explosion.
|
||||
export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
|
||||
;;
|
||||
CYGWIN*)
|
||||
# Flag that we're running on Cygwin to trigger path translation later.
|
||||
|
@ -848,7 +1070,7 @@ function hadoop_finalize_libpaths
|
|||
if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
|
||||
hadoop_translate_cygwin_path JAVA_LIBRARY_PATH
|
||||
hadoop_add_param HADOOP_OPTS java.library.path \
|
||||
"-Djava.library.path=${JAVA_LIBRARY_PATH}"
|
||||
"-Djava.library.path=${JAVA_LIBRARY_PATH}"
|
||||
export LD_LIBRARY_PATH
|
||||
fi
|
||||
}
|
||||
|
@ -997,6 +1219,7 @@ function hadoop_exit_with_usage
|
|||
if [[ -z $exitcode ]]; then
|
||||
exitcode=1
|
||||
fi
|
||||
# shellcheck disable=SC2034
|
||||
if declare -F hadoop_usage >/dev/null ; then
|
||||
hadoop_usage
|
||||
elif [[ -x /usr/bin/cowsay ]]; then
|
||||
|
@ -1293,6 +1516,7 @@ function hadoop_start_secure_daemon
|
|||
hadoop_rotate_log "${daemonoutfile}"
|
||||
hadoop_rotate_log "${daemonerrfile}"
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
jsvc="${JSVC_HOME}/jsvc"
|
||||
if [[ ! -f "${jsvc}" ]]; then
|
||||
hadoop_error "JSVC_HOME is not set or set incorrectly. jsvc is required to run secure"
|
||||
|
@ -1319,6 +1543,7 @@ function hadoop_start_secure_daemon
|
|||
hadoop_error "ERROR: Cannot write ${daemonname} pid ${privpidfile}."
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
exec "${jsvc}" \
|
||||
"-Dproc_${daemonname}" \
|
||||
-outfile "${daemonoutfile}" \
|
||||
|
@ -1634,3 +1859,101 @@ function hadoop_do_classpath_subcommand
|
|||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
## @description generic shell script opton parser. sets
|
||||
## @description HADOOP_PARSE_COUNTER to set number the
|
||||
## @description caller should shift
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable yes
|
||||
## @param [parameters, typically "$@"]
|
||||
function hadoop_parse_args
|
||||
{
|
||||
HADOOP_DAEMON_MODE="default"
|
||||
HADOOP_PARSE_COUNTER=0
|
||||
|
||||
# not all of the options supported here are supported by all commands
|
||||
# however these are:
|
||||
hadoop_add_option "--config dir" "Hadoop config directory"
|
||||
hadoop_add_option "--debug" "turn on shell script debug mode"
|
||||
hadoop_add_option "--help" "usage information"
|
||||
|
||||
while true; do
|
||||
hadoop_debug "hadoop_parse_args: processing $1"
|
||||
case $1 in
|
||||
--buildpaths)
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_ENABLE_BUILD_PATHS=true
|
||||
shift
|
||||
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
|
||||
;;
|
||||
--config)
|
||||
shift
|
||||
confdir=$1
|
||||
shift
|
||||
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
|
||||
if [[ -d "${confdir}" ]]; then
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_CONF_DIR="${confdir}"
|
||||
elif [[ -z "${confdir}" ]]; then
|
||||
hadoop_error "ERROR: No parameter provided for --config "
|
||||
hadoop_exit_with_usage 1
|
||||
else
|
||||
hadoop_error "ERROR: Cannot find configuration directory \"${confdir}\""
|
||||
hadoop_exit_with_usage 1
|
||||
fi
|
||||
;;
|
||||
--daemon)
|
||||
shift
|
||||
HADOOP_DAEMON_MODE=$1
|
||||
shift
|
||||
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
|
||||
if [[ -z "${HADOOP_DAEMON_MODE}" || \
|
||||
! "${HADOOP_DAEMON_MODE}" =~ ^st(art|op|atus)$ ]]; then
|
||||
hadoop_error "ERROR: --daemon must be followed by either \"start\", \"stop\", or \"status\"."
|
||||
hadoop_exit_with_usage 1
|
||||
fi
|
||||
;;
|
||||
--debug)
|
||||
shift
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_SHELL_SCRIPT_DEBUG=true
|
||||
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
|
||||
;;
|
||||
--help|-help|-h|help|--h|--\?|-\?|\?)
|
||||
hadoop_exit_with_usage 0
|
||||
;;
|
||||
--hostnames)
|
||||
shift
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_SLAVE_NAMES="$1"
|
||||
shift
|
||||
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
|
||||
;;
|
||||
--hosts)
|
||||
shift
|
||||
hadoop_populate_slaves_file "$1"
|
||||
shift
|
||||
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
|
||||
;;
|
||||
--loglevel)
|
||||
shift
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_LOGLEVEL="$1"
|
||||
shift
|
||||
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
|
||||
;;
|
||||
--slaves)
|
||||
shift
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_SLAVE_MODE=true
|
||||
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
hadoop_debug "hadoop_parse: asking caller to skip ${HADOOP_PARSE_COUNTER}"
|
||||
}
|
|
@ -27,7 +27,8 @@
|
|||
# HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
|
||||
##
|
||||
|
||||
function hadoop_usage {
|
||||
function hadoop_usage
|
||||
{
|
||||
echo "Usage: slaves.sh [--config confdir] command..."
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
#
|
||||
|
||||
#
|
||||
# Another example: finding java
|
||||
# Example: finding java
|
||||
#
|
||||
# By default, Hadoop assumes that $JAVA_HOME is always defined
|
||||
# outside of its configuration. Eons ago, Apple standardized
|
||||
|
@ -85,3 +85,30 @@
|
|||
# exit 1
|
||||
# fi
|
||||
#}
|
||||
|
||||
#
|
||||
# Example: efficient command execution for the slaves
|
||||
#
|
||||
# To improve performance, you can use xargs -P
|
||||
# instead of the for loop, if supported.
|
||||
#
|
||||
#function hadoop_connect_to_hosts_without_pdsh
|
||||
#{
|
||||
# # quoting here gets tricky. it's easier to push it into a function
|
||||
# # so that we don't have to deal with it. However...
|
||||
# # xargs can't use a function so instead we'll export it out
|
||||
# # and force it into a subshell
|
||||
# # moral of the story: just use pdsh.
|
||||
# export -f hadoop_actual_ssh
|
||||
# export HADOOP_SSH_OPTS
|
||||
#
|
||||
# # xargs is used with option -I to replace the placeholder in arguments
|
||||
# # list with each hostname read from stdin/pipe. But it consider one
|
||||
# # line as one argument while reading from stdin/pipe. So place each
|
||||
# # hostname in different lines while passing via pipe.
|
||||
# SLAVE_NAMES=$(echo "$SLAVE_NAMES" | tr ' ' '\n' )
|
||||
# echo "${SLAVE_NAMES}" | \
|
||||
# xargs -n 1 -P"${HADOOP_SSH_PARALLEL}" \
|
||||
# -I {} bash -c -- "hadoop_actual_ssh {} ${params}"
|
||||
# wait
|
||||
#}
|
||||
|
|
|
@ -203,6 +203,11 @@ log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
|
|||
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
|
||||
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
|
||||
|
||||
#
|
||||
# shuffle connection log from shuffleHandler
|
||||
# Uncomment the following line to enable logging of shuffle connections
|
||||
# log4j.logger.org.apache.hadoop.mapred.ShuffleHandler.audit=DEBUG
|
||||
|
||||
#
|
||||
# Yarn ResourceManager Application Summary Log
|
||||
#
|
||||
|
|
|
@ -1,170 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/**
|
||||
* General
|
||||
*/
|
||||
|
||||
img { border: 0; }
|
||||
|
||||
#content table {
|
||||
border: 0;
|
||||
width: 100%;
|
||||
}
|
||||
/*Hack to get IE to render the table at 100%*/
|
||||
* html #content table { margin-left: -3px; }
|
||||
|
||||
#content th,
|
||||
#content td {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
.clearboth {
|
||||
clear: both;
|
||||
}
|
||||
|
||||
.note, .warning, .fixme {
|
||||
border: solid black 1px;
|
||||
margin: 1em 3em;
|
||||
}
|
||||
|
||||
.note .label {
|
||||
background: #369;
|
||||
color: white;
|
||||
font-weight: bold;
|
||||
padding: 5px 10px;
|
||||
}
|
||||
.note .content {
|
||||
background: #F0F0FF;
|
||||
color: black;
|
||||
line-height: 120%;
|
||||
font-size: 90%;
|
||||
padding: 5px 10px;
|
||||
}
|
||||
.warning .label {
|
||||
background: #C00;
|
||||
color: white;
|
||||
font-weight: bold;
|
||||
padding: 5px 10px;
|
||||
}
|
||||
.warning .content {
|
||||
background: #FFF0F0;
|
||||
color: black;
|
||||
line-height: 120%;
|
||||
font-size: 90%;
|
||||
padding: 5px 10px;
|
||||
}
|
||||
.fixme .label {
|
||||
background: #C6C600;
|
||||
color: black;
|
||||
font-weight: bold;
|
||||
padding: 5px 10px;
|
||||
}
|
||||
.fixme .content {
|
||||
padding: 5px 10px;
|
||||
}
|
||||
|
||||
/**
|
||||
* Typography
|
||||
*/
|
||||
|
||||
body {
|
||||
font-family: verdana, "Trebuchet MS", arial, helvetica, sans-serif;
|
||||
font-size: 100%;
|
||||
}
|
||||
|
||||
#content {
|
||||
font-family: Georgia, Palatino, Times, serif;
|
||||
font-size: 95%;
|
||||
}
|
||||
#tabs {
|
||||
font-size: 70%;
|
||||
}
|
||||
#menu {
|
||||
font-size: 80%;
|
||||
}
|
||||
#footer {
|
||||
font-size: 70%;
|
||||
}
|
||||
|
||||
h1, h2, h3, h4, h5, h6 {
|
||||
font-family: "Trebuchet MS", verdana, arial, helvetica, sans-serif;
|
||||
font-weight: bold;
|
||||
margin-top: 1em;
|
||||
margin-bottom: .5em;
|
||||
}
|
||||
|
||||
h1 {
|
||||
margin-top: 0;
|
||||
margin-bottom: 1em;
|
||||
font-size: 1.4em;
|
||||
background-color: 73CAFF
|
||||
}
|
||||
#content h1 {
|
||||
font-size: 160%;
|
||||
margin-bottom: .5em;
|
||||
}
|
||||
#menu h1 {
|
||||
margin: 0;
|
||||
padding: 10px;
|
||||
background: #336699;
|
||||
color: white;
|
||||
}
|
||||
h2 {
|
||||
font-size: 120%;
|
||||
background-color: 73CAFF
|
||||
}
|
||||
h3 { font-size: 100%; }
|
||||
h4 { font-size: 90%; }
|
||||
h5 { font-size: 80%; }
|
||||
h6 { font-size: 75%; }
|
||||
|
||||
p {
|
||||
line-height: 120%;
|
||||
text-align: left;
|
||||
margin-top: .5em;
|
||||
margin-bottom: 1em;
|
||||
}
|
||||
|
||||
#content li,
|
||||
#content th,
|
||||
#content td,
|
||||
#content li ul,
|
||||
#content li ol{
|
||||
margin-top: .5em;
|
||||
margin-bottom: .5em;
|
||||
}
|
||||
|
||||
|
||||
#content li li,
|
||||
#minitoc-area li{
|
||||
margin-top: 0em;
|
||||
margin-bottom: 0em;
|
||||
}
|
||||
|
||||
#content .attribution {
|
||||
text-align: right;
|
||||
font-style: italic;
|
||||
font-size: 85%;
|
||||
margin-top: 1em;
|
||||
}
|
||||
|
||||
.codefrag {
|
||||
font-family: "Courier New", Courier, monospace;
|
||||
font-size: 110%;
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
body {
|
||||
font-family: Courier New, monospace;
|
||||
font-size: 10pt;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-family: Courier New, monospace;
|
||||
font-size: 10pt;
|
||||
}
|
||||
|
||||
h2 {
|
||||
font-family: Courier New, monospace;
|
||||
font-size: 10pt;
|
||||
}
|
||||
|
||||
h3 {
|
||||
font-family: Courier New, monospace;
|
||||
font-size: 10pt;
|
||||
}
|
||||
|
||||
a:link {
|
||||
color: blue;
|
||||
}
|
||||
|
||||
a:visited {
|
||||
color: purple;
|
||||
}
|
||||
|
||||
li {
|
||||
margin-top: 1em;
|
||||
margin-bottom: 1em;
|
||||
}
|
|
@ -1,286 +0,0 @@
|
|||
#!/usr/bin/perl
|
||||
#
|
||||
# Transforms Lucene Java's CHANGES.txt into Changes.html
|
||||
#
|
||||
# Input is on STDIN, output is to STDOUT
|
||||
#
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
my $jira_url_prefix = 'http://issues.apache.org/jira/browse/';
|
||||
my $title = undef;
|
||||
my $release = undef;
|
||||
my $sections = undef;
|
||||
my $items = undef;
|
||||
my $first_relid = undef;
|
||||
my $second_relid = undef;
|
||||
my @releases = ();
|
||||
|
||||
my @lines = <>; # Get all input at once
|
||||
|
||||
#
|
||||
# Parse input and build hierarchical release structure in @releases
|
||||
#
|
||||
for (my $line_num = 0 ; $line_num <= $#lines ; ++$line_num) {
|
||||
$_ = $lines[$line_num];
|
||||
next unless (/\S/); # Skip blank lines
|
||||
|
||||
unless ($title) {
|
||||
if (/\S/) {
|
||||
s/^\s+//; # Trim leading whitespace
|
||||
s/\s+$//; # Trim trailing whitespace
|
||||
}
|
||||
$title = $_;
|
||||
next;
|
||||
}
|
||||
|
||||
if (/^(Release)|(Trunk)/) { # Release headings
|
||||
$release = $_;
|
||||
$sections = [];
|
||||
push @releases, [ $release, $sections ];
|
||||
($first_relid = lc($release)) =~ s/\s+/_/g if ($#releases == 0);
|
||||
($second_relid = lc($release)) =~ s/\s+/_/g if ($#releases == 1);
|
||||
$items = undef;
|
||||
next;
|
||||
}
|
||||
|
||||
# Section heading: 2 leading spaces, words all capitalized
|
||||
if (/^ ([A-Z]+)\s*/) {
|
||||
my $heading = $_;
|
||||
$items = [];
|
||||
push @$sections, [ $heading, $items ];
|
||||
next;
|
||||
}
|
||||
|
||||
# Handle earlier releases without sections - create a headless section
|
||||
unless ($items) {
|
||||
$items = [];
|
||||
push @$sections, [ undef, $items ];
|
||||
}
|
||||
|
||||
my $type;
|
||||
if (@$items) { # A list item has been encountered in this section before
|
||||
$type = $items->[0]; # 0th position of items array is list type
|
||||
} else {
|
||||
$type = get_list_type($_);
|
||||
push @$items, $type;
|
||||
}
|
||||
|
||||
if ($type eq 'numbered') { # The modern items list style
|
||||
# List item boundary is another numbered item or an unindented line
|
||||
my $line;
|
||||
my $item = $_;
|
||||
$item =~ s/^(\s{0,2}\d+\.\s*)//; # Trim the leading item number
|
||||
my $leading_ws_width = length($1);
|
||||
$item =~ s/\s+$//; # Trim trailing whitespace
|
||||
$item .= "\n";
|
||||
|
||||
while ($line_num < $#lines
|
||||
and ($line = $lines[++$line_num]) !~ /^(?:\s{0,2}\d+\.\s*\S|\S)/) {
|
||||
$line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
|
||||
$line =~ s/\s+$//; # Trim trailing whitespace
|
||||
$item .= "$line\n";
|
||||
}
|
||||
$item =~ s/\n+\Z/\n/; # Trim trailing blank lines
|
||||
push @$items, $item;
|
||||
--$line_num unless ($line_num == $#lines);
|
||||
} elsif ($type eq 'paragraph') { # List item boundary is a blank line
|
||||
my $line;
|
||||
my $item = $_;
|
||||
$item =~ s/^(\s+)//;
|
||||
my $leading_ws_width = defined($1) ? length($1) : 0;
|
||||
$item =~ s/\s+$//; # Trim trailing whitespace
|
||||
$item .= "\n";
|
||||
|
||||
while ($line_num < $#lines and ($line = $lines[++$line_num]) =~ /\S/) {
|
||||
$line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
|
||||
$line =~ s/\s+$//; # Trim trailing whitespace
|
||||
$item .= "$line\n";
|
||||
}
|
||||
push @$items, $item;
|
||||
--$line_num unless ($line_num == $#lines);
|
||||
} else { # $type is one of the bulleted types
|
||||
# List item boundary is another bullet or a blank line
|
||||
my $line;
|
||||
my $item = $_;
|
||||
$item =~ s/^(\s*$type\s*)//; # Trim the leading bullet
|
||||
my $leading_ws_width = length($1);
|
||||
$item =~ s/\s+$//; # Trim trailing whitespace
|
||||
$item .= "\n";
|
||||
|
||||
while ($line_num < $#lines
|
||||
and ($line = $lines[++$line_num]) !~ /^\s*(?:$type|\Z)/) {
|
||||
$line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
|
||||
$line =~ s/\s+$//; # Trim trailing whitespace
|
||||
$item .= "$line\n";
|
||||
}
|
||||
push @$items, $item;
|
||||
--$line_num unless ($line_num == $#lines);
|
||||
}
|
||||
}
|
||||
|
||||
#
|
||||
# Print HTML-ified version to STDOUT
|
||||
#
|
||||
print<<"__HTML_HEADER__";
|
||||
<!--
|
||||
**********************************************************
|
||||
** WARNING: This file is generated from CHANGES.txt by the
|
||||
** Perl script 'changes2html.pl'.
|
||||
** Do *not* edit this file!
|
||||
**********************************************************
|
||||
|
||||
****************************************************************************
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
****************************************************************************
|
||||
-->
|
||||
<html>
|
||||
<head>
|
||||
<title>$title</title>
|
||||
<link rel="stylesheet" href="ChangesFancyStyle.css" title="Fancy">
|
||||
<link rel="alternate stylesheet" href="ChangesSimpleStyle.css" title="Simple">
|
||||
<META http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
|
||||
<SCRIPT>
|
||||
function toggleList(e) {
|
||||
element = document.getElementById(e).style;
|
||||
element.display == 'none' ? element.display = 'block' : element.display='none';
|
||||
}
|
||||
function collapse() {
|
||||
for (var i = 0; i < document.getElementsByTagName("ul").length; i++) {
|
||||
var list = document.getElementsByTagName("ul")[i];
|
||||
if (list.id != '$first_relid' && list.id != '$second_relid') {
|
||||
list.style.display = "none";
|
||||
}
|
||||
}
|
||||
for (var i = 0; i < document.getElementsByTagName("ol").length; i++) {
|
||||
document.getElementsByTagName("ol")[i].style.display = "none";
|
||||
}
|
||||
}
|
||||
window.onload = collapse;
|
||||
</SCRIPT>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Scalable Computing Platform"></a>
|
||||
<h1>$title</h1>
|
||||
|
||||
__HTML_HEADER__
|
||||
|
||||
my $heading;
|
||||
my $relcnt = 0;
|
||||
my $header = 'h2';
|
||||
for my $rel (@releases) {
|
||||
if (++$relcnt == 3) {
|
||||
$header = 'h3';
|
||||
print "<h2><a href=\"javascript:toggleList('older')\">";
|
||||
print "Older Releases";
|
||||
print "</a></h2>\n";
|
||||
print "<ul id=\"older\">\n"
|
||||
}
|
||||
|
||||
($release, $sections) = @$rel;
|
||||
|
||||
# The first section heading is undefined for the older sectionless releases
|
||||
my $has_release_sections = $sections->[0][0];
|
||||
|
||||
(my $relid = lc($release)) =~ s/\s+/_/g;
|
||||
print "<$header><a href=\"javascript:toggleList('$relid')\">";
|
||||
print "$release";
|
||||
print "</a></$header>\n";
|
||||
print "<ul id=\"$relid\">\n"
|
||||
if ($has_release_sections);
|
||||
|
||||
for my $section (@$sections) {
|
||||
($heading, $items) = @$section;
|
||||
(my $sectid = lc($heading)) =~ s/\s+/_/g;
|
||||
my $numItemsStr = $#{$items} > 0 ? "($#{$items})" : "(none)";
|
||||
|
||||
print " <li><a href=\"javascript:toggleList('$relid.$sectid')\">",
|
||||
($heading || ''), "</a> $numItemsStr\n"
|
||||
if ($has_release_sections);
|
||||
|
||||
my $list_type = $items->[0] || '';
|
||||
my $list = ($has_release_sections || $list_type eq 'numbered' ? 'ol' : 'ul');
|
||||
my $listid = $sectid ? "$relid.$sectid" : $relid;
|
||||
print " <$list id=\"$listid\">\n";
|
||||
|
||||
for my $itemnum (1..$#{$items}) {
|
||||
my $item = $items->[$itemnum];
|
||||
$item =~ s:&:&:g; # Escape HTML metachars
|
||||
$item =~ s:<:<:g;
|
||||
$item =~ s:>:>:g;
|
||||
|
||||
$item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:; # Separate attribution
|
||||
$item =~ s:\n{2,}:\n<p/>\n:g; # Keep paragraph breaks
|
||||
$item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA Common
|
||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||
$item =~ s{(?:${jira_url_prefix})?(HDFS-\d+)} # Link to JIRA Hdfs
|
||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||
$item =~ s{(?:${jira_url_prefix})?(MAPREDUCE-\d+)} # Link to JIRA MR
|
||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||
print " <li>$item</li>\n";
|
||||
}
|
||||
print " </$list>\n";
|
||||
print " </li>\n" if ($has_release_sections);
|
||||
}
|
||||
print "</ul>\n" if ($has_release_sections);
|
||||
}
|
||||
print "</ul>\n" if ($relcnt > 3);
|
||||
print "</body>\n</html>\n";
|
||||
|
||||
|
||||
#
|
||||
# Subroutine: get_list_type
|
||||
#
|
||||
# Takes one parameter:
|
||||
#
|
||||
# - The first line of a sub-section/point
|
||||
#
|
||||
# Returns one scalar:
|
||||
#
|
||||
# - The list type: 'numbered'; or one of the bulleted types '-', or '.' or
|
||||
# 'paragraph'.
|
||||
#
|
||||
sub get_list_type {
|
||||
my $first_list_item_line = shift;
|
||||
my $type = 'paragraph'; # Default to paragraph type
|
||||
|
||||
if ($first_list_item_line =~ /^\s{0,2}\d+\.\s+\S+/) {
|
||||
$type = 'numbered';
|
||||
} elsif ($first_list_item_line =~ /^\s*([-.])\s+\S+/) {
|
||||
$type = $1;
|
||||
}
|
||||
return $type;
|
||||
}
|
||||
|
||||
1;
|
|
@ -1,7 +0,0 @@
|
|||
This is the base documentation directory.
|
||||
|
||||
skinconf.xml # This file customizes Forrest for your project. In it, you
|
||||
# tell forrest the project name, logo, copyright info, etc
|
||||
|
||||
sitemap.xmap # Optional. This sitemap is consulted before all core sitemaps.
|
||||
# See http://forrest.apache.org/docs/project-sitemap.html
|
|
@ -1,40 +0,0 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#=======================================================================
|
||||
# CatalogManager.properties
|
||||
#
|
||||
# This is the default properties file for Apache Forrest.
|
||||
# This facilitates local configuration of application-specific catalogs.
|
||||
#
|
||||
# See the Apache Forrest documentation:
|
||||
# http://forrest.apache.org/docs/your-project.html
|
||||
# http://forrest.apache.org/docs/validation.html
|
||||
|
||||
# verbosity ... level of messages for status/debug
|
||||
# See forrest/src/core/context/WEB-INF/cocoon.xconf
|
||||
|
||||
# catalogs ... list of additional catalogs to load
|
||||
# (Note that Apache Forrest will automatically load its own default catalog
|
||||
# from src/core/context/resources/schema/catalog.xcat)
|
||||
# use full pathnames
|
||||
# pathname separator is always semi-colon (;) regardless of operating system
|
||||
# directory separator is always slash (/) regardless of operating system
|
||||
#
|
||||
#catalogs=/home/me/forrest/my-site/src/documentation/resources/schema/catalog.xcat
|
||||
catalogs=
|
||||
|
|
@ -1,327 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<!--+
|
||||
| This is the Apache Cocoon command line configuration file.
|
||||
| Here you give the command line interface details of where
|
||||
| to find various aspects of your Cocoon installation.
|
||||
|
|
||||
| If you wish, you can also use this file to specify the URIs
|
||||
| that you wish to generate.
|
||||
|
|
||||
| The current configuration information in this file is for
|
||||
| building the Cocoon documentation. Therefore, all links here
|
||||
| are relative to the build context dir, which, in the build.xml
|
||||
| file, is set to ${build.context}
|
||||
|
|
||||
| Options:
|
||||
| verbose: increase amount of information presented
|
||||
| to standard output (default: false)
|
||||
| follow-links: whether linked pages should also be
|
||||
| generated (default: true)
|
||||
| precompile-only: precompile sitemaps and XSP pages, but
|
||||
| do not generate any pages (default: false)
|
||||
| confirm-extensions: check the mime type for the generated page
|
||||
| and adjust filename and links extensions
|
||||
| to match the mime type
|
||||
| (e.g. text/html->.html)
|
||||
|
|
||||
| Note: Whilst using an xconf file to configure the Cocoon
|
||||
| Command Line gives access to more features, the use of
|
||||
| command line parameters is more stable, as there are
|
||||
| currently plans to improve the xconf format to allow
|
||||
| greater flexibility. If you require a stable and
|
||||
| consistent method for accessing the CLI, it is recommended
|
||||
| that you use the command line parameters to configure
|
||||
| the CLI. See documentation at:
|
||||
| http://cocoon.apache.org/2.1/userdocs/offline/
|
||||
| http://wiki.apache.org/cocoon/CommandLine
|
||||
|
|
||||
+-->
|
||||
|
||||
<cocoon verbose="true"
|
||||
follow-links="true"
|
||||
precompile-only="false"
|
||||
confirm-extensions="false">
|
||||
|
||||
<!--+
|
||||
| The context directory is usually the webapp directory
|
||||
| containing the sitemap.xmap file.
|
||||
|
|
||||
| The config file is the cocoon.xconf file.
|
||||
|
|
||||
| The work directory is used by Cocoon to store temporary
|
||||
| files and cache files.
|
||||
|
|
||||
| The destination directory is where generated pages will
|
||||
| be written (assuming the 'simple' mapper is used, see
|
||||
| below)
|
||||
+-->
|
||||
<context-dir>.</context-dir>
|
||||
<config-file>WEB-INF/cocoon.xconf</config-file>
|
||||
<work-dir>../tmp/cocoon-work</work-dir>
|
||||
<dest-dir>../site</dest-dir>
|
||||
|
||||
<!--+
|
||||
| A checksum file can be used to store checksums for pages
|
||||
| as they are generated. When the site is next generated,
|
||||
| files will not be written if their checksum has not changed.
|
||||
| This means that it will be easier to detect which files
|
||||
| need to be uploaded to a server, using the timestamp.
|
||||
|
|
||||
| The default path is relative to the core webapp directory.
|
||||
| An asolute path can be used.
|
||||
+-->
|
||||
<!-- <checksums-uri>build/work/checksums</checksums-uri>-->
|
||||
|
||||
<!--+
|
||||
| Broken link reporting options:
|
||||
| Report into a text file, one link per line:
|
||||
| <broken-links type="text" report="filename"/>
|
||||
| Report into an XML file:
|
||||
| <broken-links type="xml" report="filename"/>
|
||||
| Ignore broken links (default):
|
||||
| <broken-links type="none"/>
|
||||
|
|
||||
| Two attributes to this node specify whether a page should
|
||||
| be generated when an error has occured. 'generate' specifies
|
||||
| whether a page should be generated (default: true) and
|
||||
| extension specifies an extension that should be appended
|
||||
| to the generated page's filename (default: none)
|
||||
|
|
||||
| Using this, a quick scan through the destination directory
|
||||
| will show broken links, by their filename extension.
|
||||
+-->
|
||||
<broken-links type="xml"
|
||||
file="../brokenlinks.xml"
|
||||
generate="false"
|
||||
extension=".error"
|
||||
show-referrers="true"/>
|
||||
|
||||
<!--+
|
||||
| Load classes at startup. This is necessary for generating
|
||||
| from sites that use SQL databases and JDBC.
|
||||
| The <load-class> element can be repeated if multiple classes
|
||||
| are needed.
|
||||
+-->
|
||||
<!--
|
||||
<load-class>org.firebirdsql.jdbc.Driver</load-class>
|
||||
-->
|
||||
|
||||
<!--+
|
||||
| Configures logging.
|
||||
| The 'log-kit' parameter specifies the location of the log kit
|
||||
| configuration file (usually called logkit.xconf.
|
||||
|
|
||||
| Logger specifies the logging category (for all logging prior
|
||||
| to other Cocoon logging categories taking over)
|
||||
|
|
||||
| Available log levels are:
|
||||
| DEBUG: prints all level of log messages.
|
||||
| INFO: prints all level of log messages except DEBUG
|
||||
| ones.
|
||||
| WARN: prints all level of log messages except DEBUG
|
||||
| and INFO ones.
|
||||
| ERROR: prints all level of log messages except DEBUG,
|
||||
| INFO and WARN ones.
|
||||
| FATAL_ERROR: prints only log messages of this level
|
||||
+-->
|
||||
<!-- <logging log-kit="WEB-INF/logkit.xconf" logger="cli" level="ERROR" /> -->
|
||||
|
||||
<!--+
|
||||
| Specifies the filename to be appended to URIs that
|
||||
| refer to a directory (i.e. end with a forward slash).
|
||||
+-->
|
||||
<default-filename>index.html</default-filename>
|
||||
|
||||
<!--+
|
||||
| Specifies a user agent string to the sitemap when
|
||||
| generating the site.
|
||||
|
|
||||
| A generic term for a web browser is "user agent". Any
|
||||
| user agent, when connecting to a web server, will provide
|
||||
| a string to identify itself (e.g. as Internet Explorer or
|
||||
| Mozilla). It is possible to have Cocoon serve different
|
||||
| content depending upon the user agent string provided by
|
||||
| the browser. If your site does this, then you may want to
|
||||
| use this <user-agent> entry to provide a 'fake' user agent
|
||||
| to Cocoon, so that it generates the correct version of your
|
||||
| site.
|
||||
|
|
||||
| For most sites, this can be ignored.
|
||||
+-->
|
||||
<!--
|
||||
<user-agent>Cocoon Command Line Environment 2.1</user-agent>
|
||||
-->
|
||||
|
||||
<!--+
|
||||
| Specifies an accept string to the sitemap when generating
|
||||
| the site.
|
||||
| User agents can specify to an HTTP server what types of content
|
||||
| (by mime-type) they are able to receive. E.g. a browser may be
|
||||
| able to handle jpegs, but not pngs. The HTTP accept header
|
||||
| allows the server to take the browser's capabilities into account,
|
||||
| and only send back content that it can handle.
|
||||
|
|
||||
| For most sites, this can be ignored.
|
||||
+-->
|
||||
|
||||
<accept>*/*</accept>
|
||||
|
||||
<!--+
|
||||
| Specifies which URIs should be included or excluded, according
|
||||
| to wildcard patterns.
|
||||
|
|
||||
| These includes/excludes are only relevant when you are following
|
||||
| links. A link URI must match an include pattern (if one is given)
|
||||
| and not match an exclude pattern, if it is to be followed by
|
||||
| Cocoon. It can be useful, for example, where there are links in
|
||||
| your site to pages that are not generated by Cocoon, such as
|
||||
| references to api-documentation.
|
||||
|
|
||||
| By default, all URIs are included. If both include and exclude
|
||||
| patterns are specified, a URI is first checked against the
|
||||
| include patterns, and then against the exclude patterns.
|
||||
|
|
||||
| Multiple patterns can be given, using muliple include or exclude
|
||||
| nodes.
|
||||
|
|
||||
| The order of the elements is not significant, as only the first
|
||||
| successful match of each category is used.
|
||||
|
|
||||
| Currently, only the complete source URI can be matched (including
|
||||
| any URI prefix). Future plans include destination URI matching
|
||||
| and regexp matching. If you have requirements for these, contact
|
||||
| dev@cocoon.apache.org.
|
||||
+-->
|
||||
|
||||
<exclude pattern="**/"/>
|
||||
<exclude pattern="api/**"/>
|
||||
<exclude pattern="jdiff/**"/>
|
||||
<exclude pattern="changes.html"/>
|
||||
<exclude pattern="releasenotes.html"/>
|
||||
|
||||
<!--
|
||||
This is a workaround for FOR-284 "link rewriting broken when
|
||||
linking to xml source views which contain site: links".
|
||||
See the explanation there and in declare-broken-site-links.xsl
|
||||
-->
|
||||
<exclude pattern="site:**"/>
|
||||
<exclude pattern="ext:**"/>
|
||||
<exclude pattern="lm:**"/>
|
||||
<exclude pattern="**/site:**"/>
|
||||
<exclude pattern="**/ext:**"/>
|
||||
<exclude pattern="**/lm:**"/>
|
||||
|
||||
<!-- Exclude tokens used in URLs to ASF mirrors (interpreted by a CGI) -->
|
||||
<exclude pattern="[preferred]/**"/>
|
||||
<exclude pattern="[location]"/>
|
||||
|
||||
<!-- <include-links extension=".html"/>-->
|
||||
|
||||
<!--+
|
||||
| <uri> nodes specify the URIs that should be generated, and
|
||||
| where required, what should be done with the generated pages.
|
||||
| They describe the way the URI of the generated file is created
|
||||
| from the source page's URI. There are three ways that a generated
|
||||
| file URI can be created: append, replace and insert.
|
||||
|
|
||||
| The "type" attribute specifies one of (append|replace|insert):
|
||||
|
|
||||
| append:
|
||||
| Append the generated page's URI to the end of the source URI:
|
||||
|
|
||||
| <uri type="append" src-prefix="documents/" src="index.html"
|
||||
| dest="build/dest/"/>
|
||||
|
|
||||
| This means that
|
||||
| (1) the "documents/index.html" page is generated
|
||||
| (2) the file will be written to "build/dest/documents/index.html"
|
||||
|
|
||||
| replace:
|
||||
| Completely ignore the generated page's URI - just
|
||||
| use the destination URI:
|
||||
|
|
||||
| <uri type="replace" src-prefix="documents/" src="index.html"
|
||||
| dest="build/dest/docs.html"/>
|
||||
|
|
||||
| This means that
|
||||
| (1) the "documents/index.html" page is generated
|
||||
| (2) the result is written to "build/dest/docs.html"
|
||||
| (3) this works only for "single" pages - and not when links
|
||||
| are followed
|
||||
|
|
||||
| insert:
|
||||
| Insert generated page's URI into the destination
|
||||
| URI at the point marked with a * (example uses fictional
|
||||
| zip protocol)
|
||||
|
|
||||
| <uri type="insert" src-prefix="documents/" src="index.html"
|
||||
| dest="zip://*.zip/page.html"/>
|
||||
|
|
||||
| This means that
|
||||
| (1)
|
||||
|
|
||||
| In any of these scenarios, if the dest attribute is omitted,
|
||||
| the value provided globally using the <dest-dir> node will
|
||||
| be used instead.
|
||||
+-->
|
||||
<!--
|
||||
<uri type="replace"
|
||||
src-prefix="samples/"
|
||||
src="hello-world/hello.html"
|
||||
dest="build/dest/hello-world.html"/>
|
||||
-->
|
||||
|
||||
<!--+
|
||||
| <uri> nodes can be grouped together in a <uris> node. This
|
||||
| enables a group of URIs to share properties. The following
|
||||
| properties can be set for a group of URIs:
|
||||
| * follow-links: should pages be crawled for links
|
||||
| * confirm-extensions: should file extensions be checked
|
||||
| for the correct mime type
|
||||
| * src-prefix: all source URIs should be
|
||||
| pre-pended with this prefix before
|
||||
| generation. The prefix is not
|
||||
| included when calculating the
|
||||
| destination URI
|
||||
| * dest: the base destination URI to be
|
||||
| shared by all pages in this group
|
||||
| * type: the method to be used to calculate
|
||||
| the destination URI. See above
|
||||
| section on <uri> node for details.
|
||||
|
|
||||
| Each <uris> node can have a name attribute. When a name
|
||||
| attribute has been specified, the -n switch on the command
|
||||
| line can be used to tell Cocoon to only process the URIs
|
||||
| within this URI group. When no -n switch is given, all
|
||||
| <uris> nodes are processed. Thus, one xconf file can be
|
||||
| used to manage multiple sites.
|
||||
+-->
|
||||
<!--
|
||||
<uris name="mirrors" follow-links="false">
|
||||
<uri type="append" src="mirrors.html"/>
|
||||
</uris>
|
||||
-->
|
||||
|
||||
<!--+
|
||||
| File containing URIs (plain text, one per line).
|
||||
+-->
|
||||
<!--
|
||||
<uri-file>uris.txt</uri-file>
|
||||
-->
|
||||
</cocoon>
|
|
@ -1,48 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN" "http://forrest.apache.org/dtd/document-v20.dtd">
|
||||
|
||||
<document>
|
||||
|
||||
<header>
|
||||
<title>Overview</title>
|
||||
</header>
|
||||
|
||||
<body>
|
||||
<p>
|
||||
The Hadoop Common Documentation describes the common utilities and libraries that support the other Hadoop subprojects.
|
||||
</p>
|
||||
<p>
|
||||
The Hadoop Common Documentation also includes the information you need to get started using Hadoop.
|
||||
Begin with the Hadoop <a href="single_node_setup.html">Single Node Setup</a> which shows you how to set up a single-node Hadoop installation.
|
||||
Then move on to the Hadoop <a href="cluster_setup.html">Cluster Setup</a> to learn how to set up a multi-node Hadoop installation.
|
||||
</p>
|
||||
<p>
|
||||
Cluster environments commonly work in tandem with MapReduce applications and distributed file systems.
|
||||
For information about MapReduce see the
|
||||
<a href="http://hadoop.apache.org/mapreduce/docs/current/index.html">MapReduce Documentation</a>.
|
||||
For information about the Hadoop Distributed File System (HDFS) see the
|
||||
<a href="http://hadoop.apache.org/hdfs/docs/current/index.html">HDFS Documentation</a>.
|
||||
</p>
|
||||
<p>
|
||||
If you have more questions, you can ask on the <a href="ext:lists">Hadoop Common Mailing Lists</a> or browse the <a href="ext:archive">Mailing List Archives</a>.
|
||||
</p>
|
||||
</body>
|
||||
|
||||
</document>
|
|
@ -1,263 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<!--
|
||||
Forrest site.xml
|
||||
|
||||
This file contains an outline of the site's information content. It is used to:
|
||||
- Generate the website menus (though these can be overridden - see docs)
|
||||
- Provide semantic, location-independent aliases for internal 'site:' URIs, eg
|
||||
<link href="site:changes"> links to changes.html (or ../changes.html if in
|
||||
subdir).
|
||||
- Provide aliases for external URLs in the external-refs section. Eg, <link
|
||||
href="ext:cocoon"> links to http://xml.apache.org/cocoon/
|
||||
|
||||
See http://forrest.apache.org/docs/linking.html for more info.
|
||||
-->
|
||||
|
||||
<site label="Hadoop" href="" xmlns="http://apache.org/forrest/linkmap/1.0">
|
||||
|
||||
<docs label="Getting Started">
|
||||
<overview label="Overview" href="index.html" />
|
||||
<quickstart label="Single Node Setup" href="single_node_setup.html" />
|
||||
<setup label="Cluster Setup" href="cluster_setup.html" />
|
||||
</docs>
|
||||
|
||||
<docs label="Guides">
|
||||
<deployment label="Deployment Layout" href="deployment_layout.html" />
|
||||
<commands_manual label="Hadoop Commands" href="commands_manual.html" />
|
||||
<fsshell label="File System Shell" href="file_system_shell.html" />
|
||||
<SLA label="Service Level Authorization" href="service_level_auth.html"/>
|
||||
<native_lib label="Native Libraries" href="native_libraries.html" />
|
||||
<superusers label="Superusers Acting On Behalf Of Other Users" href="Superusers.html"/>
|
||||
<http_authentication label="Authentication for Hadoop HTTP web-consoles" href="HttpAuthentication.html"/>
|
||||
</docs>
|
||||
|
||||
<docs label="Miscellaneous">
|
||||
<api label="API Docs" href="ext:api/index" />
|
||||
<jdiff label="API Changes" href="ext:jdiff/changes" />
|
||||
<wiki label="Wiki" href="ext:wiki" />
|
||||
<faq label="FAQ" href="ext:faq" />
|
||||
<relnotes label="Release Notes" href="ext:relnotes" />
|
||||
<changes label="Change Log" href="ext:changes" />
|
||||
</docs>
|
||||
|
||||
<external-refs>
|
||||
<site href="http://hadoop.apache.org/common/"/>
|
||||
<lists href="http://hadoop.apache.org/common/mailing_lists.html"/>
|
||||
<archive href="http://mail-archives.apache.org/mod_mbox/hadoop-common-commits/"/>
|
||||
<releases href="http://hadoop.apache.org/common/releases.html">
|
||||
<download href="#Download" />
|
||||
</releases>
|
||||
<jira href="http://hadoop.apache.org/common/issue_tracking.html"/>
|
||||
<wiki href="http://wiki.apache.org/hadoop/Common" />
|
||||
<faq href="http://wiki.apache.org/hadoop/Common/FAQ" />
|
||||
|
||||
<core-default href="http://hadoop.apache.org/common/docs/current/core-default.html" />
|
||||
<hdfs-default href="http://hadoop.apache.org/hdfs/docs/current/hdfs-default.html" />
|
||||
<mapred-default href="http://hadoop.apache.org/mapreduce/docs/current/mapred-default.html" />
|
||||
|
||||
<mapred-queues href="http://hadoop.apache.org/mapreduce/docs/current/mapred_queues.xml" />
|
||||
<capacity-scheduler href="http://hadoop.apache.org/mapreduce/docs/current/capacity_scheduler.html">
|
||||
<MemoryBasedTaskScheduling href="#Scheduling+Tasks+Considering+Memory+Requirements" />
|
||||
</capacity-scheduler>
|
||||
<mapred-tutorial href="http://hadoop.apache.org/mapreduce/docs/current/mapred_tutorial.html" >
|
||||
<JobAuthorization href="#Job+Authorization" />
|
||||
<ConfiguringMemoryRequirements href="#Configuring+Memory+Requirements+For+A+Job" />
|
||||
</mapred-tutorial>
|
||||
<streaming href="http://hadoop.apache.org/mapreduce/docs/current/streaming.html" />
|
||||
<distcp href="http://hadoop.apache.org/mapreduce/docs/current/distcp.html" />
|
||||
<hadoop-archives href="http://hadoop.apache.org/mapreduce/docs/current/hadoop_archives.html" />
|
||||
|
||||
<zlib href="http://www.zlib.net/" />
|
||||
<gzip href="http://www.gzip.org/" />
|
||||
<bzip href="http://www.bzip.org/" />
|
||||
<osx href="http://www.apple.com/macosx" />
|
||||
|
||||
<relnotes href="releasenotes.html" />
|
||||
<changes href="changes.html" />
|
||||
<jdiff href="jdiff/">
|
||||
<changes href="changes.html" />
|
||||
</jdiff>
|
||||
<api href="api/">
|
||||
<index href="index.html" />
|
||||
<org href="org/">
|
||||
<apache href="apache/">
|
||||
<hadoop href="hadoop/">
|
||||
<conf href="conf/">
|
||||
<configuration href="Configuration.html">
|
||||
<final_parameters href="#FinalParams" />
|
||||
<get href="#get(java.lang.String, java.lang.String)" />
|
||||
<set href="#set(java.lang.String, java.lang.String)" />
|
||||
</configuration>
|
||||
</conf>
|
||||
<filecache href="filecache/">
|
||||
<distributedcache href="DistributedCache.html">
|
||||
<addarchivetoclasspath href="#addArchiveToClassPath(org.apache.hadoop.fs.Path,%20org.apache.hadoop.conf.Configuration)" />
|
||||
<addfiletoclasspath href="#addFileToClassPath(org.apache.hadoop.fs.Path,%20org.apache.hadoop.conf.Configuration)" />
|
||||
<addcachefile href="#addCacheFile(java.net.URI,%20org.apache.hadoop.conf.Configuration)" />
|
||||
<addcachearchive href="#addCacheArchive(java.net.URI,%20org.apache.hadoop.conf.Configuration)" />
|
||||
<setcachefiles href="#setCacheFiles(java.net.URI[],%20org.apache.hadoop.conf.Configuration)" />
|
||||
<setcachearchives href="#setCacheArchives(java.net.URI[],%20org.apache.hadoop.conf.Configuration)" />
|
||||
<createsymlink href="#createSymlink(org.apache.hadoop.conf.Configuration)" />
|
||||
</distributedcache>
|
||||
</filecache>
|
||||
<fs href="fs/">
|
||||
<filesystem href="FileSystem.html" />
|
||||
</fs>
|
||||
<io href="io/">
|
||||
<closeable href="Closeable.html">
|
||||
<close href="#close()" />
|
||||
</closeable>
|
||||
<sequencefile href="SequenceFile.html" />
|
||||
<sequencefilecompressiontype href="SequenceFile.CompressionType.html">
|
||||
<none href="#NONE" />
|
||||
<record href="#RECORD" />
|
||||
<block href="#BLOCK" />
|
||||
</sequencefilecompressiontype>
|
||||
<writable href="Writable.html" />
|
||||
<writablecomparable href="WritableComparable.html" />
|
||||
<compress href="compress/">
|
||||
<compressioncodec href="CompressionCodec.html" />
|
||||
</compress>
|
||||
</io>
|
||||
<mapred href="mapred/">
|
||||
<clusterstatus href="ClusterStatus.html" />
|
||||
<counters href="Counters.html" />
|
||||
<fileinputformat href="FileInputFormat.html">
|
||||
<setinputpaths href="#setInputPaths(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path[])" />
|
||||
<addinputpath href="#addInputPath(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path)" />
|
||||
<setinputpathstring href="#setInputPaths(org.apache.hadoop.mapred.JobConf,%20java.lang.String)" />
|
||||
<addinputpathstring href="#addInputPath(org.apache.hadoop.mapred.JobConf,%20java.lang.String)" />
|
||||
</fileinputformat>
|
||||
<fileoutputformat href="FileOutputFormat.html">
|
||||
<getoutputpath href="#getOutputPath(org.apache.hadoop.mapred.JobConf)" />
|
||||
<getworkoutputpath href="#getWorkOutputPath(org.apache.hadoop.mapred.JobConf)" />
|
||||
<setoutputpath href="#setOutputPath(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path)" />
|
||||
<setcompressoutput href="#setCompressOutput(org.apache.hadoop.mapred.JobConf,%20boolean)" />
|
||||
<setoutputcompressorclass href="#setOutputCompressorClass(org.apache.hadoop.mapred.JobConf,%20java.lang.Class)" />
|
||||
</fileoutputformat>
|
||||
<filesplit href="FileSplit.html" />
|
||||
<inputformat href="InputFormat.html" />
|
||||
<inputsplit href="InputSplit.html" />
|
||||
<isolationrunner href="IsolationRunner.html" />
|
||||
<jobclient href="JobClient.html">
|
||||
<runjob href="#runJob(org.apache.hadoop.mapred.JobConf)" />
|
||||
<submitjob href="#submitJob(org.apache.hadoop.mapred.JobConf)" />
|
||||
</jobclient>
|
||||
<jobconf href="JobConf.html">
|
||||
<setnummaptasks href="#setNumMapTasks(int)" />
|
||||
<setnumreducetasks href="#setNumReduceTasks(int)" />
|
||||
<setoutputkeycomparatorclass href="#setOutputKeyComparatorClass(java.lang.Class)" />
|
||||
<setoutputvaluegroupingcomparator href="#setOutputValueGroupingComparator(java.lang.Class)" />
|
||||
<setcombinerclass href="#setCombinerClass(java.lang.Class)" />
|
||||
<setmapdebugscript href="#setMapDebugScript(java.lang.String)" />
|
||||
<setreducedebugscript href="#setReduceDebugScript(java.lang.String)" />
|
||||
<setmapspeculativeexecution href="#setMapSpeculativeExecution(boolean)" />
|
||||
<setreducespeculativeexecution href="#setReduceSpeculativeExecution(boolean)" />
|
||||
<setmaxmapattempts href="#setMaxMapAttempts(int)" />
|
||||
<setmaxreduceattempts href="#setMaxReduceAttempts(int)" />
|
||||
<setmaxmaptaskfailurespercent href="#setMaxMapTaskFailuresPercent(int)" />
|
||||
<setmaxreducetaskfailurespercent href="#setMaxReduceTaskFailuresPercent(int)" />
|
||||
<setjobendnotificationuri href="#setJobEndNotificationURI(java.lang.String)" />
|
||||
<setcompressmapoutput href="#setCompressMapOutput(boolean)" />
|
||||
<setmapoutputcompressorclass href="#setMapOutputCompressorClass(java.lang.Class)" />
|
||||
<setprofileenabled href="#setProfileEnabled(boolean)" />
|
||||
<setprofiletaskrange href="#setProfileTaskRange(boolean,%20java.lang.String)" />
|
||||
<setprofileparams href="#setProfileParams(java.lang.String)" />
|
||||
<setnumtaskstoexecuteperjvm href="#setNumTasksToExecutePerJvm(int)" />
|
||||
<setqueuename href="#setQueueName(java.lang.String)" />
|
||||
<getjoblocaldir href="#getJobLocalDir()" />
|
||||
<getjar href="#getJar()" />
|
||||
</jobconf>
|
||||
<jobconfigurable href="JobConfigurable.html">
|
||||
<configure href="#configure(org.apache.hadoop.mapred.JobConf)" />
|
||||
</jobconfigurable>
|
||||
<jobcontrol href="jobcontrol/">
|
||||
<package-summary href="package-summary.html" />
|
||||
</jobcontrol>
|
||||
<mapper href="Mapper.html">
|
||||
<map href="#map(K1, V1, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)" />
|
||||
</mapper>
|
||||
<outputcollector href="OutputCollector.html">
|
||||
<collect href="#collect(K, V)" />
|
||||
</outputcollector>
|
||||
<outputcommitter href="OutputCommitter.html" />
|
||||
<outputformat href="OutputFormat.html" />
|
||||
<outputlogfilter href="OutputLogFilter.html" />
|
||||
<sequencefileoutputformat href="SequenceFileOutputFormat.html">
|
||||
<setoutputcompressiontype href="#setOutputCompressionType(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.io.SequenceFile.CompressionType)" />
|
||||
</sequencefileoutputformat>
|
||||
<partitioner href="Partitioner.html" />
|
||||
<recordreader href="RecordReader.html" />
|
||||
<recordwriter href="RecordWriter.html" />
|
||||
<reducer href="Reducer.html">
|
||||
<reduce href="#reduce(K2, java.util.Iterator, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)" />
|
||||
</reducer>
|
||||
<reporter href="Reporter.html">
|
||||
<incrcounterEnum href="#incrCounter(java.lang.Enum, long)" />
|
||||
<incrcounterString href="#incrCounter(java.lang.String, java.lang.String, long amount)" />
|
||||
</reporter>
|
||||
<runningjob href="RunningJob.html" />
|
||||
<skipbadrecords href="SkipBadRecords.html">
|
||||
<setmappermaxskiprecords href="#setMapperMaxSkipRecords(org.apache.hadoop.conf.Configuration, long)"/>
|
||||
<setreducermaxskipgroups href="#setReducerMaxSkipGroups(org.apache.hadoop.conf.Configuration, long)"/>
|
||||
<setattemptsTostartskipping href="#setAttemptsToStartSkipping(org.apache.hadoop.conf.Configuration, int)"/>
|
||||
<setskipoutputpath href="#setSkipOutputPath(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path)"/>
|
||||
<counter_map_processed_records href="#COUNTER_MAP_PROCESSED_RECORDS"/>
|
||||
<counter_reduce_processed_groups href="#COUNTER_REDUCE_PROCESSED_GROUPS"/>
|
||||
</skipbadrecords>
|
||||
<textinputformat href="TextInputFormat.html" />
|
||||
<textoutputformat href="TextOutputFormat.html" />
|
||||
<lib href="lib/">
|
||||
<package-summary href="package-summary.html" />
|
||||
<hashpartitioner href="HashPartitioner.html" />
|
||||
<keyfieldbasedpartitioner href="KeyFieldBasedPartitioner.html" />
|
||||
<keyfieldbasedcomparator href="KeyFieldBasedComparator.html" />
|
||||
<lazyoutputformat href="LazyOutputFormat.html" />
|
||||
<aggregate href="aggregate/">
|
||||
<package-summary href="package-summary.html" />
|
||||
</aggregate>
|
||||
</lib>
|
||||
<pipes href="pipes/">
|
||||
<package-summary href="package-summary.html" />
|
||||
</pipes>
|
||||
</mapred>
|
||||
<net href="net/">
|
||||
<dnstoswitchmapping href="DNSToSwitchMapping.html">
|
||||
<resolve href="#resolve(java.util.List)" />
|
||||
</dnstoswitchmapping>
|
||||
</net>
|
||||
<streaming href="streaming/">
|
||||
<package-summary href="package-summary.html" />
|
||||
</streaming>
|
||||
<util href="util/">
|
||||
<genericoptionsparser href="GenericOptionsParser.html" />
|
||||
<progress href="Progress.html" />
|
||||
<tool href="Tool.html" />
|
||||
<toolrunner href="ToolRunner.html">
|
||||
<run href="#run(org.apache.hadoop.util.Tool, java.lang.String[])" />
|
||||
</toolrunner>
|
||||
</util>
|
||||
</hadoop>
|
||||
</apache>
|
||||
</org>
|
||||
</api>
|
||||
</external-refs>
|
||||
|
||||
</site>
|
|
@ -1,37 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE tabs PUBLIC "-//APACHE//DTD Cocoon Documentation Tab V1.0//EN"
|
||||
"http://forrest.apache.org/dtd/tab-cocoon-v10.dtd">
|
||||
|
||||
<tabs software="Hadoop"
|
||||
title="Hadoop"
|
||||
copyright="The Apache Software Foundation"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
|
||||
<!-- The rules are:
|
||||
@dir will always have /index.html added.
|
||||
@href is not modified unless it is root-relative and obviously specifies a
|
||||
directory (ends in '/'), in which case /index.html will be added
|
||||
-->
|
||||
|
||||
<tab label="Project" href="http://hadoop.apache.org/common/" />
|
||||
<tab label="Wiki" href="http://wiki.apache.org/hadoop" />
|
||||
<tab label="Common 0.24 Documentation" dir="" />
|
||||
|
||||
</tabs>
|
Before Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 5.7 KiB |
Before Width: | Height: | Size: 6.5 KiB |
Before Width: | Height: | Size: 766 B |
Before Width: | Height: | Size: 125 KiB |
Before Width: | Height: | Size: 9.2 KiB |
Before Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 40 KiB |
Before Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 29 KiB |
|
@ -1,366 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<!--
|
||||
Skin configuration file. This file contains details of your project,
|
||||
which will be used to configure the chosen Forrest skin.
|
||||
-->
|
||||
|
||||
<!DOCTYPE skinconfig PUBLIC "-//APACHE//DTD Skin Configuration V0.6-3//EN" "http://forrest.apache.org/dtd/skinconfig-v06-3.dtd">
|
||||
<skinconfig>
|
||||
<!-- To enable lucene search add provider="lucene" (default is google).
|
||||
Add box-location="alt" to move the search box to an alternate location
|
||||
(if the skin supports it) and box-location="all" to show it in all
|
||||
available locations on the page. Remove the <search> element to show
|
||||
no search box. @domain will enable sitesearch for the specific domain with google.
|
||||
In other words google will search the @domain for the query string.
|
||||
|
||||
-->
|
||||
<search name="Lucene" domain="hadoop.apache.org" provider="google"/>
|
||||
|
||||
<!-- Disable the print link? If enabled, invalid HTML 4.0.1 -->
|
||||
<disable-print-link>true</disable-print-link>
|
||||
<!-- Disable the PDF link? -->
|
||||
<disable-pdf-link>false</disable-pdf-link>
|
||||
<!-- Disable the POD link? -->
|
||||
<disable-pod-link>true</disable-pod-link>
|
||||
<!-- Disable the Text link? FIXME: NOT YET IMPLEMENETED. -->
|
||||
<disable-txt-link>true</disable-txt-link>
|
||||
<!-- Disable the xml source link? -->
|
||||
<!-- The xml source link makes it possible to access the xml rendition
|
||||
of the source frim the html page, and to have it generated statically.
|
||||
This can be used to enable other sites and services to reuse the
|
||||
xml format for their uses. Keep this disabled if you don't want other
|
||||
sites to easily reuse your pages.-->
|
||||
<disable-xml-link>true</disable-xml-link>
|
||||
|
||||
<!-- Disable navigation icons on all external links? -->
|
||||
<disable-external-link-image>true</disable-external-link-image>
|
||||
|
||||
<!-- Disable w3c compliance links?
|
||||
Use e.g. align="center" to move the compliance links logos to
|
||||
an alternate location default is left.
|
||||
(if the skin supports it) -->
|
||||
<disable-compliance-links>true</disable-compliance-links>
|
||||
|
||||
<!-- Render mailto: links unrecognisable by spam harvesters? -->
|
||||
<obfuscate-mail-links>false</obfuscate-mail-links>
|
||||
|
||||
<!-- Disable the javascript facility to change the font size -->
|
||||
<disable-font-script>true</disable-font-script>
|
||||
|
||||
<!-- project logo -->
|
||||
<project-name>Hadoop</project-name>
|
||||
<project-description>Scalable Computing Platform</project-description>
|
||||
<project-url>http://hadoop.apache.org/core/</project-url>
|
||||
<project-logo>images/common-logo.jpg</project-logo>
|
||||
|
||||
<!-- group logo -->
|
||||
<group-name>Hadoop</group-name>
|
||||
<group-description>Apache Hadoop</group-description>
|
||||
<group-url>http://hadoop.apache.org/</group-url>
|
||||
<group-logo>images/hadoop-logo.jpg</group-logo>
|
||||
|
||||
<!-- optional host logo (e.g. sourceforge logo)
|
||||
default skin: renders it at the bottom-left corner -->
|
||||
<host-url></host-url>
|
||||
<host-logo></host-logo>
|
||||
|
||||
<!-- relative url of a favicon file, normally favicon.ico -->
|
||||
<favicon-url>images/favicon.ico</favicon-url>
|
||||
|
||||
<!-- The following are used to construct a copyright statement -->
|
||||
<year>2009</year>
|
||||
<vendor>The Apache Software Foundation.</vendor>
|
||||
<copyright-link>http://www.apache.org/licenses/</copyright-link>
|
||||
|
||||
<!-- Some skins use this to form a 'breadcrumb trail' of links.
|
||||
Use location="alt" to move the trail to an alternate location
|
||||
(if the skin supports it).
|
||||
Omit the location attribute to display the trail in the default location.
|
||||
Use location="none" to not display the trail (if the skin supports it).
|
||||
For some skins just set the attributes to blank.
|
||||
-->
|
||||
<trail>
|
||||
<link1 name="Apache" href="http://www.apache.org/"/>
|
||||
<link2 name="Hadoop" href="http://hadoop.apache.org/"/>
|
||||
<link3 name="Common" href="http://hadoop.apache.org/common/"/>
|
||||
</trail>
|
||||
|
||||
<!-- Configure the TOC, i.e. the Table of Contents.
|
||||
@max-depth
|
||||
how many "section" levels need to be included in the
|
||||
generated Table of Contents (TOC).
|
||||
@min-sections
|
||||
Minimum required to create a TOC.
|
||||
@location ("page","menu","page,menu", "none")
|
||||
Where to show the TOC.
|
||||
-->
|
||||
<toc max-depth="2" min-sections="1" location="page"/>
|
||||
|
||||
<!-- Heading types can be clean|underlined|boxed -->
|
||||
<headings type="clean"/>
|
||||
|
||||
<!-- The optional feedback element will be used to construct a
|
||||
feedback link in the footer with the page pathname appended:
|
||||
<a href="@href">{@to}</a>
|
||||
<feedback to="webmaster@foo.com"
|
||||
href="mailto:webmaster@foo.com?subject=Feedback " >
|
||||
Send feedback about the website to:
|
||||
</feedback>
|
||||
-->
|
||||
<!--
|
||||
extra-css - here you can define custom css-elements that are
|
||||
a. overriding the fallback elements or
|
||||
b. adding the css definition from new elements that you may have
|
||||
used in your documentation.
|
||||
-->
|
||||
<extra-css>
|
||||
<!--Example of b.
|
||||
To define the css definition of a new element that you may have used
|
||||
in the class attribute of a <p> node.
|
||||
e.g. <p class="quote"/>
|
||||
-->
|
||||
p.quote {
|
||||
margin-left: 2em;
|
||||
padding: .5em;
|
||||
background-color: #f0f0f0;
|
||||
font-family: monospace;
|
||||
}
|
||||
|
||||
<!--Headers -->
|
||||
#content h1 {
|
||||
margin-bottom: .5em;
|
||||
font-size: 185%; color: black;
|
||||
font-family: arial;
|
||||
}
|
||||
h2, .h3 { font-size: 175%; color: black; font-family: arial; }
|
||||
h3, .h4 { font-size: 135%; color: black; font-family: arial; margin-bottom: 0.5em; }
|
||||
h4, .h5 { font-size: 125%; color: black; font-style: italic; font-weight: bold; font-family: arial; }
|
||||
h5, h6 { font-size: 110%; color: #363636; font-weight: bold; }
|
||||
|
||||
<!--Code Background -->
|
||||
pre.code {
|
||||
margin-left: 0em;
|
||||
padding: 0.5em;
|
||||
background-color: rgb(241,239,231);
|
||||
font-family: monospace;
|
||||
}
|
||||
|
||||
</extra-css>
|
||||
|
||||
<colors>
|
||||
<!-- These values are used for the generated CSS files. -->
|
||||
|
||||
<!-- Krysalis -->
|
||||
<!--
|
||||
<color name="header" value="#FFFFFF"/>
|
||||
|
||||
<color name="tab-selected" value="#a5b6c6" link="#000000" vlink="#000000" hlink="#000000"/>
|
||||
<color name="tab-unselected" value="#F7F7F7" link="#000000" vlink="#000000" hlink="#000000"/>
|
||||
<color name="subtab-selected" value="#a5b6c6" link="#000000" vlink="#000000" hlink="#000000"/>
|
||||
<color name="subtab-unselected" value="#a5b6c6" link="#000000" vlink="#000000" hlink="#000000"/>
|
||||
|
||||
<color name="heading" value="#a5b6c6"/>
|
||||
<color name="subheading" value="#CFDCED"/>
|
||||
|
||||
<color name="navstrip" value="#CFDCED" font="#000000" link="#000000" vlink="#000000" hlink="#000000"/>
|
||||
<color name="toolbox" value="#a5b6c6"/>
|
||||
<color name="border" value="#a5b6c6"/>
|
||||
|
||||
<color name="menu" value="#F7F7F7" link="#000000" vlink="#000000" hlink="#000000"/>
|
||||
<color name="dialog" value="#F7F7F7"/>
|
||||
|
||||
<color name="body" value="#ffffff" link="#0F3660" vlink="#009999" hlink="#000066"/>
|
||||
|
||||
<color name="table" value="#a5b6c6"/>
|
||||
<color name="table-cell" value="#ffffff"/>
|
||||
<color name="highlight" value="#ffff00"/>
|
||||
<color name="fixme" value="#cc6600"/>
|
||||
<color name="note" value="#006699"/>
|
||||
<color name="warning" value="#990000"/>
|
||||
<color name="code" value="#a5b6c6"/>
|
||||
|
||||
<color name="footer" value="#a5b6c6"/>
|
||||
-->
|
||||
|
||||
<!-- Forrest -->
|
||||
<!--
|
||||
<color name="header" value="#294563"/>
|
||||
|
||||
<color name="tab-selected" value="#4a6d8c" link="#0F3660" vlink="#0F3660" hlink="#000066"/>
|
||||
<color name="tab-unselected" value="#b5c7e7" link="#0F3660" vlink="#0F3660" hlink="#000066"/>
|
||||
<color name="subtab-selected" value="#4a6d8c" link="#0F3660" vlink="#0F3660" hlink="#000066"/>
|
||||
<color name="subtab-unselected" value="#4a6d8c" link="#0F3660" vlink="#0F3660" hlink="#000066"/>
|
||||
|
||||
<color name="heading" value="#294563"/>
|
||||
<color name="subheading" value="#4a6d8c"/>
|
||||
|
||||
<color name="navstrip" value="#cedfef" font="#0F3660" link="#0F3660" vlink="#0F3660" hlink="#000066"/>
|
||||
<color name="toolbox" value="#4a6d8c"/>
|
||||
<color name="border" value="#294563"/>
|
||||
|
||||
<color name="menu" value="#4a6d8c" font="#cedfef" link="#ffffff" vlink="#ffffff" hlink="#ffcf00"/>
|
||||
<color name="dialog" value="#4a6d8c"/>
|
||||
|
||||
<color name="body" value="#ffffff" link="#0F3660" vlink="#009999" hlink="#000066"/>
|
||||
|
||||
<color name="table" value="#7099C5"/>
|
||||
<color name="table-cell" value="#f0f0ff"/>
|
||||
<color name="highlight" value="#ffff00"/>
|
||||
<color name="fixme" value="#cc6600"/>
|
||||
<color name="note" value="#006699"/>
|
||||
<color name="warning" value="#990000"/>
|
||||
<color name="code" value="#CFDCED"/>
|
||||
|
||||
<color name="footer" value="#cedfef"/>
|
||||
-->
|
||||
|
||||
<!-- Collabnet -->
|
||||
<!--
|
||||
<color name="header" value="#003366"/>
|
||||
|
||||
<color name="tab-selected" value="#dddddd" link="#555555" vlink="#555555" hlink="#555555"/>
|
||||
<color name="tab-unselected" value="#999999" link="#ffffff" vlink="#ffffff" hlink="#ffffff"/>
|
||||
<color name="subtab-selected" value="#cccccc" link="#000000" vlink="#000000" hlink="#000000"/>
|
||||
<color name="subtab-unselected" value="#cccccc" link="#555555" vlink="#555555" hlink="#555555"/>
|
||||
|
||||
<color name="heading" value="#003366"/>
|
||||
<color name="subheading" value="#888888"/>
|
||||
|
||||
<color name="navstrip" value="#dddddd" font="#555555"/>
|
||||
<color name="toolbox" value="#dddddd" font="#555555"/>
|
||||
<color name="border" value="#999999"/>
|
||||
|
||||
<color name="menu" value="#ffffff"/>
|
||||
<color name="dialog" value="#eeeeee"/>
|
||||
|
||||
<color name="body" value="#ffffff"/>
|
||||
|
||||
<color name="table" value="#ccc"/>
|
||||
<color name="table-cell" value="#ffffff"/>
|
||||
<color name="highlight" value="#ffff00"/>
|
||||
<color name="fixme" value="#cc6600"/>
|
||||
<color name="note" value="#006699"/>
|
||||
<color name="warning" value="#990000"/>
|
||||
<color name="code" value="#003366"/>
|
||||
|
||||
<color name="footer" value="#ffffff"/>
|
||||
-->
|
||||
<!-- Lenya using pelt-->
|
||||
<!--
|
||||
<color name="header" value="#ffffff"/>
|
||||
|
||||
<color name="tab-selected" value="#4C6C8F" link="#ffffff" vlink="#ffffff" hlink="#ffffff"/>
|
||||
<color name="tab-unselected" value="#E5E4D9" link="#000000" vlink="#000000" hlink="#000000"/>
|
||||
<color name="subtab-selected" value="#000000" link="#000000" vlink="#000000" hlink="#000000"/>
|
||||
<color name="subtab-unselected" value="#E5E4D9" link="#000000" vlink="#000000" hlink="#000000"/>
|
||||
|
||||
<color name="heading" value="#E5E4D9"/>
|
||||
<color name="subheading" value="#000000"/>
|
||||
<color name="published" value="#4C6C8F" font="#FFFFFF"/>
|
||||
<color name="feedback" value="#4C6C8F" font="#FFFFFF" align="center"/>
|
||||
<color name="navstrip" value="#E5E4D9" font="#000000"/>
|
||||
|
||||
<color name="toolbox" value="#CFDCED" font="#000000"/>
|
||||
|
||||
<color name="border" value="#999999"/>
|
||||
<color name="menu" value="#4C6C8F" font="#ffffff" link="#ffffff" vlink="#ffffff" hlink="#ffffff" current="#FFCC33" />
|
||||
<color name="menuheading" value="#cfdced" font="#000000" />
|
||||
<color name="searchbox" value="#E5E4D9" font="#000000"/>
|
||||
|
||||
<color name="dialog" value="#CFDCED"/>
|
||||
<color name="body" value="#ffffff" />
|
||||
|
||||
<color name="table" value="#ccc"/>
|
||||
<color name="table-cell" value="#ffffff"/>
|
||||
<color name="highlight" value="#ffff00"/>
|
||||
<color name="fixme" value="#cc6600"/>
|
||||
<color name="note" value="#006699"/>
|
||||
<color name="warning" value="#990000"/>
|
||||
<color name="code" value="#003366"/>
|
||||
|
||||
<color name="footer" value="#E5E4D9"/>
|
||||
-->
|
||||
</colors>
|
||||
|
||||
<!-- Settings specific to PDF output. -->
|
||||
<pdf>
|
||||
<!--
|
||||
Supported page sizes are a0, a1, a2, a3, a4, a5, executive,
|
||||
folio, legal, ledger, letter, quarto, tabloid (default letter).
|
||||
Supported page orientations are portrait, landscape (default
|
||||
portrait).
|
||||
Supported text alignments are left, right, justify (default left).
|
||||
-->
|
||||
<page size="letter" orientation="portrait" text-align="left"/>
|
||||
|
||||
<!--
|
||||
Margins can be specified for top, bottom, inner, and outer
|
||||
edges. If double-sided="false", the inner edge is always left
|
||||
and the outer is always right. If double-sided="true", the
|
||||
inner edge will be left on odd pages, right on even pages,
|
||||
the outer edge vice versa.
|
||||
Specified below are the default settings.
|
||||
-->
|
||||
<margins double-sided="false">
|
||||
<top>1in</top>
|
||||
<bottom>1in</bottom>
|
||||
<inner>1.25in</inner>
|
||||
<outer>1in</outer>
|
||||
</margins>
|
||||
|
||||
<!--
|
||||
Print the URL text next to all links going outside the file
|
||||
-->
|
||||
<show-external-urls>false</show-external-urls>
|
||||
|
||||
<!--
|
||||
Disable the copyright footer on each page of the PDF.
|
||||
A footer is composed for each page. By default, a "credit" with role=pdf
|
||||
will be used, as explained below. Otherwise a copyright statement
|
||||
will be generated. This latter can be disabled.
|
||||
-->
|
||||
<disable-copyright-footer>false</disable-copyright-footer>
|
||||
</pdf>
|
||||
|
||||
<!-- Credits are typically rendered as a set of small clickable
|
||||
images in the page footer.
|
||||
Use box-location="alt" to move the credit to an alternate location
|
||||
(if the skin supports it).
|
||||
-->
|
||||
<credits>
|
||||
<credit box-location="alt">
|
||||
<name>Built with Apache Forrest</name>
|
||||
<url>http://forrest.apache.org/</url>
|
||||

|
||||
<width>88</width>
|
||||
<height>31</height>
|
||||
</credit>
|
||||
<!-- A credit with @role="pdf" will be used to compose a footer
|
||||
for each page in the PDF, using either "name" or "url" or both.
|
||||
-->
|
||||
<!--
|
||||
<credit role="pdf">
|
||||
<name>Built with Apache Forrest</name>
|
||||
<url>http://forrest.apache.org/</url>
|
||||
</credit>
|
||||
-->
|
||||
</credits>
|
||||
|
||||
</skinconfig>
|
|
@ -1,75 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<status>
|
||||
|
||||
<developers>
|
||||
<person name="Joe Bloggs" email="joe@joescompany.org" id="JB" />
|
||||
<!-- Add more people here -->
|
||||
</developers>
|
||||
|
||||
<changes>
|
||||
<!-- Add new releases here -->
|
||||
<release version="0.1" date="unreleased">
|
||||
<!-- Some action types have associated images. By default, images are
|
||||
defined for 'add', 'fix', 'remove', 'update' and 'hack'. If you add
|
||||
src/documentation/resources/images/<foo>.jpg images, these will
|
||||
automatically be used for entries of type <foo>. -->
|
||||
|
||||
<action dev="JB" type="add" context="admin">
|
||||
Initial Import
|
||||
</action>
|
||||
<!-- Sample action:
|
||||
<action dev="JB" type="fix" due-to="Joe Contributor"
|
||||
due-to-email="joec@apache.org" fixes-bug="123">
|
||||
Fixed a bug in the Foo class.
|
||||
</action>
|
||||
-->
|
||||
</release>
|
||||
</changes>
|
||||
|
||||
<todo>
|
||||
<actions priority="high">
|
||||
<action context="docs" dev="JB">
|
||||
Customize this template project with your project's details. This
|
||||
TODO list is generated from 'status.xml'.
|
||||
</action>
|
||||
<action context="docs" dev="JB">
|
||||
Add lots of content. XML content goes in
|
||||
<code>src/documentation/content/xdocs</code>, or wherever the
|
||||
<code>${project.xdocs-dir}</code> property (set in
|
||||
<code>forrest.properties</code>) points.
|
||||
</action>
|
||||
<action context="feedback" dev="JB">
|
||||
Mail <link
|
||||
href="mailto:forrest-dev@xml.apache.org">forrest-dev@xml.apache.org</link>
|
||||
with feedback.
|
||||
</action>
|
||||
</actions>
|
||||
<!-- Add todo items. @context is an arbitrary string. Eg:
|
||||
<actions priority="high">
|
||||
<action context="code" dev="SN">
|
||||
</action>
|
||||
</actions>
|
||||
<actions priority="medium">
|
||||
<action context="docs" dev="open">
|
||||
</action>
|
||||
</actions>
|
||||
-->
|
||||
</todo>
|
||||
|
||||
</status>
|
|
@ -2739,10 +2739,10 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
private void loadProperty(Properties properties, String name, String attr,
|
||||
String value, boolean finalParameter, String[] source) {
|
||||
if (value != null || allowNullValueProperties) {
|
||||
if (value == null) {
|
||||
value = DEFAULT_STRING_CHECK;
|
||||
}
|
||||
if (!finalParameters.contains(attr)) {
|
||||
if (value==null && allowNullValueProperties) {
|
||||
value = DEFAULT_STRING_CHECK;
|
||||
}
|
||||
properties.setProperty(attr, value);
|
||||
if(source != null) {
|
||||
updatingResource.put(attr, source);
|
||||
|
|
|
@ -88,6 +88,11 @@ public abstract class ReconfigurableBase
|
|||
reconfigurationUtil = Preconditions.checkNotNull(ru);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new configuration.
|
||||
*/
|
||||
protected abstract Configuration getNewConf();
|
||||
|
||||
@VisibleForTesting
|
||||
public Collection<PropertyChange> getChangedProperties(
|
||||
Configuration newConf, Configuration oldConf) {
|
||||
|
@ -108,17 +113,16 @@ public abstract class ReconfigurableBase
|
|||
public void run() {
|
||||
LOG.info("Starting reconfiguration task.");
|
||||
Configuration oldConf = this.parent.getConf();
|
||||
Configuration newConf = new Configuration();
|
||||
Configuration newConf = this.parent.getNewConf();
|
||||
Collection<PropertyChange> changes =
|
||||
this.parent.getChangedProperties(newConf, oldConf);
|
||||
Map<PropertyChange, Optional<String>> results = Maps.newHashMap();
|
||||
for (PropertyChange change : changes) {
|
||||
String errorMessage = null;
|
||||
if (!this.parent.isPropertyReconfigurable(change.prop)) {
|
||||
errorMessage = "Property " + change.prop +
|
||||
" is not reconfigurable";
|
||||
LOG.info(errorMessage);
|
||||
results.put(change, Optional.of(errorMessage));
|
||||
LOG.info(String.format(
|
||||
"Property %s is not configurable: old value: %s, new value: %s",
|
||||
change.prop, change.oldVal, change.newVal));
|
||||
continue;
|
||||
}
|
||||
LOG.info("Change property: " + change.prop + " from \""
|
||||
|
|
|
@ -544,7 +544,9 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
|
|||
// AuthenticatedURL properly to set authToken post initialization)
|
||||
}
|
||||
HttpExceptionUtils.validateResponse(conn, expectedResponse);
|
||||
if (APPLICATION_JSON_MIME.equalsIgnoreCase(conn.getContentType())
|
||||
if (conn.getContentType() != null
|
||||
&& conn.getContentType().trim().toLowerCase()
|
||||
.startsWith(APPLICATION_JSON_MIME)
|
||||
&& klass != null) {
|
||||
ObjectMapper mapper = new ObjectMapper();
|
||||
InputStream is = null;
|
||||
|
|
|
@ -1236,6 +1236,19 @@ public abstract class AbstractFileSystem {
|
|||
+ " doesn't support setStoragePolicy");
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the storage policy for a given file or directory.
|
||||
*
|
||||
* @param src file or directory path.
|
||||
* @return storage policy for give file.
|
||||
* @throws IOException
|
||||
*/
|
||||
public BlockStoragePolicySpi getStoragePolicy(final Path src)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getStoragePolicy");
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve all the storage policies supported by this file system.
|
||||
*
|
||||
|
|
|
@ -34,11 +34,15 @@ public class BlockLocation {
|
|||
private String[] cachedHosts; // Datanode hostnames with a cached replica
|
||||
private String[] names; // Datanode IP:xferPort for accessing the block
|
||||
private String[] topologyPaths; // Full path name in network topology
|
||||
private String[] storageIds; // Storage ID of each replica
|
||||
private StorageType[] storageTypes; // Storage type of each replica
|
||||
private long offset; // Offset of the block in the file
|
||||
private long length;
|
||||
private boolean corrupt;
|
||||
|
||||
private static final String[] EMPTY_STR_ARRAY = new String[0];
|
||||
private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY =
|
||||
new StorageType[0];
|
||||
|
||||
/**
|
||||
* Default Constructor
|
||||
|
@ -58,6 +62,8 @@ public class BlockLocation {
|
|||
this.offset = that.offset;
|
||||
this.length = that.length;
|
||||
this.corrupt = that.corrupt;
|
||||
this.storageIds = that.storageIds;
|
||||
this.storageTypes = that.storageTypes;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -95,6 +101,13 @@ public class BlockLocation {
|
|||
|
||||
public BlockLocation(String[] names, String[] hosts, String[] cachedHosts,
|
||||
String[] topologyPaths, long offset, long length, boolean corrupt) {
|
||||
this(names, hosts, cachedHosts, topologyPaths, null, null, offset, length,
|
||||
corrupt);
|
||||
}
|
||||
|
||||
public BlockLocation(String[] names, String[] hosts, String[] cachedHosts,
|
||||
String[] topologyPaths, String[] storageIds, StorageType[] storageTypes,
|
||||
long offset, long length, boolean corrupt) {
|
||||
if (names == null) {
|
||||
this.names = EMPTY_STR_ARRAY;
|
||||
} else {
|
||||
|
@ -115,6 +128,16 @@ public class BlockLocation {
|
|||
} else {
|
||||
this.topologyPaths = topologyPaths;
|
||||
}
|
||||
if (storageIds == null) {
|
||||
this.storageIds = EMPTY_STR_ARRAY;
|
||||
} else {
|
||||
this.storageIds = storageIds;
|
||||
}
|
||||
if (storageTypes == null) {
|
||||
this.storageTypes = EMPTY_STORAGE_TYPE_ARRAY;
|
||||
} else {
|
||||
this.storageTypes = storageTypes;
|
||||
}
|
||||
this.offset = offset;
|
||||
this.length = length;
|
||||
this.corrupt = corrupt;
|
||||
|
@ -149,6 +172,20 @@ public class BlockLocation {
|
|||
return topologyPaths;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the storageID of each replica of the block.
|
||||
*/
|
||||
public String[] getStorageIds() {
|
||||
return storageIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the storage type of each replica of the block.
|
||||
*/
|
||||
public StorageType[] getStorageTypes() {
|
||||
return storageTypes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the start offset of file associated with this block
|
||||
*/
|
||||
|
@ -235,6 +272,22 @@ public class BlockLocation {
|
|||
}
|
||||
}
|
||||
|
||||
public void setStorageIds(String[] storageIds) {
|
||||
if (storageIds == null) {
|
||||
this.storageIds = EMPTY_STR_ARRAY;
|
||||
} else {
|
||||
this.storageIds = storageIds;
|
||||
}
|
||||
}
|
||||
|
||||
public void setStorageTypes(StorageType[] storageTypes) {
|
||||
if (storageTypes == null) {
|
||||
this.storageTypes = EMPTY_STORAGE_TYPE_ARRAY;
|
||||
} else {
|
||||
this.storageTypes = storageTypes;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder result = new StringBuilder();
|
||||
|
|
|
@ -19,11 +19,15 @@ package org.apache.hadoop.fs;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* Implementers of this interface provide a read API that writes to a
|
||||
* ByteBuffer, not a byte[].
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public interface ByteBufferReadable {
|
||||
/**
|
||||
* Reads up to buf.remaining() bytes into buf. Callers should use
|
||||
|
|
|
@ -363,5 +363,11 @@ public class CommonConfigurationKeysPublic {
|
|||
"hadoop.security.random.device.file.path";
|
||||
public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT =
|
||||
"/dev/urandom";
|
||||
|
||||
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
||||
public static final String HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY =
|
||||
"hadoop.shell.missing.defaultFs.warning";
|
||||
public static final boolean HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT =
|
||||
false;
|
||||
}
|
||||
|
||||
|
|
|
@ -46,12 +46,27 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
|
|||
Configuration conf, String supportedScheme, boolean authorityRequired)
|
||||
throws IOException, URISyntaxException {
|
||||
super(theUri, supportedScheme, authorityRequired,
|
||||
theFsImpl.getDefaultPort());
|
||||
getDefaultPortIfDefined(theFsImpl));
|
||||
fsImpl = theFsImpl;
|
||||
fsImpl.initialize(theUri, conf);
|
||||
fsImpl.statistics = getStatistics();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the default port if the file system defines one.
|
||||
* {@link FileSystem#getDefaultPort()} returns 0 to indicate the default port
|
||||
* is undefined. However, the logic that consumes this value expects to
|
||||
* receive -1 to indicate the port is undefined, which agrees with the
|
||||
* contract of {@link URI#getPort()}.
|
||||
*
|
||||
* @param theFsImpl file system to check for default port
|
||||
* @return default port, or -1 if default port is undefined
|
||||
*/
|
||||
private static int getDefaultPortIfDefined(FileSystem theFsImpl) {
|
||||
int defaultPort = theFsImpl.getDefaultPort();
|
||||
return defaultPort != 0 ? defaultPort : -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path getInitialWorkingDirectory() {
|
||||
return fsImpl.getInitialWorkingDirectory();
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.hadoop.fs.permission.FsAction;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
|
||||
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.ipc.RpcClientException;
|
||||
import org.apache.hadoop.ipc.RpcServerException;
|
||||
|
@ -59,6 +60,8 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* The FileContext class provides an interface for users of the Hadoop
|
||||
* file system. It exposes a number of file system operations, e.g. create,
|
||||
|
@ -262,6 +265,7 @@ public class FileContext {
|
|||
* has been deliberately declared private.
|
||||
*/
|
||||
Path fixRelativePart(Path p) {
|
||||
Preconditions.checkNotNull(p, "path cannot be null");
|
||||
if (p.isUriPathAbsolute()) {
|
||||
return p;
|
||||
} else {
|
||||
|
@ -2688,6 +2692,25 @@ public class FileContext {
|
|||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Query the effective storage policy ID for the given file or directory.
|
||||
*
|
||||
* @param src file or directory path.
|
||||
* @return storage policy for give file.
|
||||
* @throws IOException
|
||||
*/
|
||||
public BlockStoragePolicySpi getStoragePolicy(Path path) throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
return new FSLinkResolver<BlockStoragePolicySpi>() {
|
||||
@Override
|
||||
public BlockStoragePolicySpi next(final AbstractFileSystem fs,
|
||||
final Path p)
|
||||
throws IOException {
|
||||
return fs.getStoragePolicy(p);
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve all the storage policies supported by this file system.
|
||||
*
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.hadoop.io.Writable;
|
|||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class FileStatus implements Writable, Comparable {
|
||||
public class FileStatus implements Writable, Comparable<FileStatus> {
|
||||
|
||||
private Path path;
|
||||
private long length;
|
||||
|
@ -323,19 +323,14 @@ public class FileStatus implements Writable, Comparable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Compare this object to another object
|
||||
*
|
||||
* @param o the object to be compared.
|
||||
* Compare this FileStatus to another FileStatus
|
||||
* @param o the FileStatus to be compared.
|
||||
* @return a negative integer, zero, or a positive integer as this object
|
||||
* is less than, equal to, or greater than the specified object.
|
||||
*
|
||||
* @throws ClassCastException if the specified object's is not of
|
||||
* type FileStatus
|
||||
*/
|
||||
@Override
|
||||
public int compareTo(Object o) {
|
||||
FileStatus other = (FileStatus)o;
|
||||
return this.getPath().compareTo(other.getPath());
|
||||
public int compareTo(FileStatus o) {
|
||||
return this.getPath().compareTo(o.getPath());
|
||||
}
|
||||
|
||||
/** Compare if this object is equal to another object
|
||||
|
|
|
@ -20,7 +20,8 @@ package org.apache.hadoop.fs;
|
|||
import java.io.Closeable;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.lang.ref.PhantomReference;
|
||||
import java.lang.ref.ReferenceQueue;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
|
@ -32,7 +33,6 @@ import java.util.HashMap;
|
|||
import java.util.HashSet;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NoSuchElementException;
|
||||
|
@ -67,6 +67,9 @@ import org.apache.hadoop.util.Progressable;
|
|||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
|
@ -1498,7 +1501,9 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
/**
|
||||
* List the statuses of the files/directories in the given path if the path is
|
||||
* a directory.
|
||||
*
|
||||
* <p>
|
||||
* Does not guarantee to return the List of files/directories status in a
|
||||
* sorted order.
|
||||
* @param f given path
|
||||
* @return the statuses of the files/directories in the given patch
|
||||
* @throws FileNotFoundException when the path does not exist;
|
||||
|
@ -1540,6 +1545,9 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
/**
|
||||
* Filter files/directories in the given path using the user-supplied path
|
||||
* filter.
|
||||
* <p>
|
||||
* Does not guarantee to return the List of files/directories status in a
|
||||
* sorted order.
|
||||
*
|
||||
* @param f
|
||||
* a path name
|
||||
|
@ -1560,6 +1568,9 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
/**
|
||||
* Filter files/directories in the given list of paths using default
|
||||
* path filter.
|
||||
* <p>
|
||||
* Does not guarantee to return the List of files/directories status in a
|
||||
* sorted order.
|
||||
*
|
||||
* @param files
|
||||
* a list of paths
|
||||
|
@ -1576,6 +1587,9 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
/**
|
||||
* Filter files/directories in the given list of paths using user-supplied
|
||||
* path filter.
|
||||
* <p>
|
||||
* Does not guarantee to return the List of files/directories status in a
|
||||
* sorted order.
|
||||
*
|
||||
* @param files
|
||||
* a list of paths
|
||||
|
@ -1736,6 +1750,8 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
* while consuming the entries. Each file system implementation should
|
||||
* override this method and provide a more efficient implementation, if
|
||||
* possible.
|
||||
* Does not guarantee to return the iterator that traverses statuses
|
||||
* of the files in a sorted order.
|
||||
*
|
||||
* @param p target path
|
||||
* @return remote iterator
|
||||
|
@ -1763,6 +1779,8 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
|
||||
/**
|
||||
* List the statuses and block locations of the files in the given path.
|
||||
* Does not guarantee to return the iterator that traverses statuses
|
||||
* of the files in a sorted order.
|
||||
*
|
||||
* If the path is a directory,
|
||||
* if recursive is false, returns files in the directory;
|
||||
|
@ -2070,9 +2088,9 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
/** Return the total size of all files in the filesystem.*/
|
||||
public long getUsed() throws IOException{
|
||||
long used = 0;
|
||||
FileStatus[] files = listStatus(new Path("/"));
|
||||
for(FileStatus file:files){
|
||||
used += file.getLen();
|
||||
RemoteIterator<LocatedFileStatus> files = listFiles(new Path("/"), true);
|
||||
while (files.hasNext()) {
|
||||
used += files.next().getLen();
|
||||
}
|
||||
return used;
|
||||
}
|
||||
|
@ -2625,6 +2643,19 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
+ " doesn't support setStoragePolicy");
|
||||
}
|
||||
|
||||
/**
|
||||
* Query the effective storage policy ID for the given file or directory.
|
||||
*
|
||||
* @param src file or directory path.
|
||||
* @return storage policy for give file.
|
||||
* @throws IOException
|
||||
*/
|
||||
public BlockStoragePolicySpi getStoragePolicy(final Path src)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getStoragePolicy");
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve all the storage policies supported by this file system.
|
||||
*
|
||||
|
@ -2675,10 +2706,19 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
|
||||
private static FileSystem createFileSystem(URI uri, Configuration conf
|
||||
) throws IOException {
|
||||
Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
|
||||
FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf);
|
||||
fs.initialize(uri, conf);
|
||||
return fs;
|
||||
TraceScope scope = Trace.startSpan("FileSystem#createFileSystem");
|
||||
Span span = scope.getSpan();
|
||||
if (span != null) {
|
||||
span.addKVAnnotation("scheme", uri.getScheme());
|
||||
}
|
||||
try {
|
||||
Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
|
||||
FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf);
|
||||
fs.initialize(uri, conf);
|
||||
return fs;
|
||||
} finally {
|
||||
scope.close();
|
||||
}
|
||||
}
|
||||
|
||||
/** Caching FileSystem objects */
|
||||
|
@ -2905,16 +2945,6 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
volatile int readOps;
|
||||
volatile int largeReadOps;
|
||||
volatile int writeOps;
|
||||
/**
|
||||
* Stores a weak reference to the thread owning this StatisticsData.
|
||||
* This allows us to remove StatisticsData objects that pertain to
|
||||
* threads that no longer exist.
|
||||
*/
|
||||
final WeakReference<Thread> owner;
|
||||
|
||||
StatisticsData(WeakReference<Thread> owner) {
|
||||
this.owner = owner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add another StatisticsData object to this one.
|
||||
|
@ -2987,15 +3017,35 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
private final ThreadLocal<StatisticsData> threadData;
|
||||
|
||||
/**
|
||||
* List of all thread-local data areas. Protected by the Statistics lock.
|
||||
* Set of all thread-local data areas. Protected by the Statistics lock.
|
||||
* The references to the statistics data are kept using phantom references
|
||||
* to the associated threads. Proper clean-up is performed by the cleaner
|
||||
* thread when the threads are garbage collected.
|
||||
*/
|
||||
private LinkedList<StatisticsData> allData;
|
||||
private final Set<StatisticsDataReference> allData;
|
||||
|
||||
/**
|
||||
* Global reference queue and a cleaner thread that manage statistics data
|
||||
* references from all filesystem instances.
|
||||
*/
|
||||
private static final ReferenceQueue<Thread> STATS_DATA_REF_QUEUE;
|
||||
private static final Thread STATS_DATA_CLEANER;
|
||||
|
||||
static {
|
||||
STATS_DATA_REF_QUEUE = new ReferenceQueue<Thread>();
|
||||
// start a single daemon cleaner thread
|
||||
STATS_DATA_CLEANER = new Thread(new StatisticsDataReferenceCleaner());
|
||||
STATS_DATA_CLEANER.
|
||||
setName(StatisticsDataReferenceCleaner.class.getName());
|
||||
STATS_DATA_CLEANER.setDaemon(true);
|
||||
STATS_DATA_CLEANER.start();
|
||||
}
|
||||
|
||||
public Statistics(String scheme) {
|
||||
this.scheme = scheme;
|
||||
this.rootData = new StatisticsData(null);
|
||||
this.rootData = new StatisticsData();
|
||||
this.threadData = new ThreadLocal<StatisticsData>();
|
||||
this.allData = null;
|
||||
this.allData = new HashSet<StatisticsDataReference>();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3005,7 +3055,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
*/
|
||||
public Statistics(Statistics other) {
|
||||
this.scheme = other.scheme;
|
||||
this.rootData = new StatisticsData(null);
|
||||
this.rootData = new StatisticsData();
|
||||
other.visitAll(new StatisticsAggregator<Void>() {
|
||||
@Override
|
||||
public void accept(StatisticsData data) {
|
||||
|
@ -3017,6 +3067,63 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
}
|
||||
});
|
||||
this.threadData = new ThreadLocal<StatisticsData>();
|
||||
this.allData = new HashSet<StatisticsDataReference>();
|
||||
}
|
||||
|
||||
/**
|
||||
* A phantom reference to a thread that also includes the data associated
|
||||
* with that thread. On the thread being garbage collected, it is enqueued
|
||||
* to the reference queue for clean-up.
|
||||
*/
|
||||
private class StatisticsDataReference extends PhantomReference<Thread> {
|
||||
private final StatisticsData data;
|
||||
|
||||
public StatisticsDataReference(StatisticsData data, Thread thread) {
|
||||
super(thread, STATS_DATA_REF_QUEUE);
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
public StatisticsData getData() {
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs clean-up action when the associated thread is garbage
|
||||
* collected.
|
||||
*/
|
||||
public void cleanUp() {
|
||||
// use the statistics lock for safety
|
||||
synchronized (Statistics.this) {
|
||||
/*
|
||||
* If the thread that created this thread-local data no longer exists,
|
||||
* remove the StatisticsData from our list and fold the values into
|
||||
* rootData.
|
||||
*/
|
||||
rootData.add(data);
|
||||
allData.remove(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Background action to act on references being removed.
|
||||
*/
|
||||
private static class StatisticsDataReferenceCleaner implements Runnable {
|
||||
@Override
|
||||
public void run() {
|
||||
while (true) {
|
||||
try {
|
||||
StatisticsDataReference ref =
|
||||
(StatisticsDataReference)STATS_DATA_REF_QUEUE.remove();
|
||||
ref.cleanUp();
|
||||
} catch (Throwable th) {
|
||||
// the cleaner thread should continue to run even if there are
|
||||
// exceptions, including InterruptedException
|
||||
LOG.warn("exception in the cleaner thread but it will continue to "
|
||||
+ "run", th);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3025,14 +3132,12 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
public StatisticsData getThreadStatistics() {
|
||||
StatisticsData data = threadData.get();
|
||||
if (data == null) {
|
||||
data = new StatisticsData(
|
||||
new WeakReference<Thread>(Thread.currentThread()));
|
||||
data = new StatisticsData();
|
||||
threadData.set(data);
|
||||
StatisticsDataReference ref =
|
||||
new StatisticsDataReference(data, Thread.currentThread());
|
||||
synchronized(this) {
|
||||
if (allData == null) {
|
||||
allData = new LinkedList<StatisticsData>();
|
||||
}
|
||||
allData.add(data);
|
||||
allData.add(ref);
|
||||
}
|
||||
}
|
||||
return data;
|
||||
|
@ -3090,21 +3195,9 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
*/
|
||||
private synchronized <T> T visitAll(StatisticsAggregator<T> visitor) {
|
||||
visitor.accept(rootData);
|
||||
if (allData != null) {
|
||||
for (Iterator<StatisticsData> iter = allData.iterator();
|
||||
iter.hasNext(); ) {
|
||||
StatisticsData data = iter.next();
|
||||
visitor.accept(data);
|
||||
if (data.owner.get() == null) {
|
||||
/*
|
||||
* If the thread that created this thread-local data no
|
||||
* longer exists, remove the StatisticsData from our list
|
||||
* and fold the values into rootData.
|
||||
*/
|
||||
rootData.add(data);
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
for (StatisticsDataReference ref: allData) {
|
||||
StatisticsData data = ref.getData();
|
||||
visitor.accept(data);
|
||||
}
|
||||
return visitor.aggregate();
|
||||
}
|
||||
|
@ -3211,7 +3304,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
@Override
|
||||
public String toString() {
|
||||
return visitAll(new StatisticsAggregator<String>() {
|
||||
private StatisticsData total = new StatisticsData(null);
|
||||
private StatisticsData total = new StatisticsData();
|
||||
|
||||
@Override
|
||||
public void accept(StatisticsData data) {
|
||||
|
@ -3244,7 +3337,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
*/
|
||||
public void reset() {
|
||||
visitAll(new StatisticsAggregator<Void>() {
|
||||
private StatisticsData total = new StatisticsData(null);
|
||||
private StatisticsData total = new StatisticsData();
|
||||
|
||||
@Override
|
||||
public void accept(StatisticsData data) {
|
||||
|
@ -3266,6 +3359,11 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
public String getScheme() {
|
||||
return scheme;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
synchronized int getAllThreadLocalDataSize() {
|
||||
return allData.size();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.*;
|
||||
import java.net.InetAddress;
|
||||
import java.net.URI;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Enumeration;
|
||||
|
@ -446,6 +449,10 @@ public class FileUtil {
|
|||
IOUtils.closeStream( in );
|
||||
throw e;
|
||||
}
|
||||
} else if (!src.canRead()) {
|
||||
throw new IOException(src.toString() +
|
||||
": Permission denied");
|
||||
|
||||
} else {
|
||||
throw new IOException(src.toString() +
|
||||
": No such file or directory");
|
||||
|
@ -727,6 +734,12 @@ public class FileUtil {
|
|||
}
|
||||
}
|
||||
|
||||
if (entry.isLink()) {
|
||||
File src = new File(outputDir, entry.getLinkName());
|
||||
HardLink.createHardLink(src, outputFile);
|
||||
return;
|
||||
}
|
||||
|
||||
int count;
|
||||
byte data[] = new byte[2048];
|
||||
BufferedOutputStream outputStream = new BufferedOutputStream(
|
||||
|
@ -1319,4 +1332,43 @@ public class FileUtil {
|
|||
unexpandedWildcardClasspath.toString()};
|
||||
return jarCp;
|
||||
}
|
||||
|
||||
public static boolean compareFs(FileSystem srcFs, FileSystem destFs) {
|
||||
if (srcFs==null || destFs==null) {
|
||||
return false;
|
||||
}
|
||||
URI srcUri = srcFs.getUri();
|
||||
URI dstUri = destFs.getUri();
|
||||
if (srcUri.getScheme()==null) {
|
||||
return false;
|
||||
}
|
||||
if (!srcUri.getScheme().equals(dstUri.getScheme())) {
|
||||
return false;
|
||||
}
|
||||
String srcHost = srcUri.getHost();
|
||||
String dstHost = dstUri.getHost();
|
||||
if ((srcHost!=null) && (dstHost!=null)) {
|
||||
if (srcHost.equals(dstHost)) {
|
||||
return srcUri.getPort()==dstUri.getPort();
|
||||
}
|
||||
try {
|
||||
srcHost = InetAddress.getByName(srcHost).getCanonicalHostName();
|
||||
dstHost = InetAddress.getByName(dstHost).getCanonicalHostName();
|
||||
} catch (UnknownHostException ue) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Could not compare file-systems. Unknown host: ", ue);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
if (!srcHost.equals(dstHost)) {
|
||||
return false;
|
||||
}
|
||||
} else if (srcHost==null && dstHost!=null) {
|
||||
return false;
|
||||
} else if (srcHost!=null) {
|
||||
return false;
|
||||
}
|
||||
// check for ports
|
||||
return srcUri.getPort()==dstUri.getPort();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -627,6 +627,12 @@ public class FilterFileSystem extends FileSystem {
|
|||
fs.setStoragePolicy(src, policyName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockStoragePolicySpi getStoragePolicy(final Path src)
|
||||
throws IOException {
|
||||
return fs.getStoragePolicy(src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
|
||||
throws IOException {
|
||||
|
|
|
@ -405,6 +405,12 @@ public abstract class FilterFs extends AbstractFileSystem {
|
|||
myFs.setStoragePolicy(path, policyName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockStoragePolicySpi getStoragePolicy(final Path src)
|
||||
throws IOException {
|
||||
return myFs.getStoragePolicy(src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
|
||||
throws IOException {
|
||||
|
|
|
@ -32,9 +32,16 @@ import org.apache.hadoop.conf.Configured;
|
|||
import org.apache.hadoop.fs.shell.Command;
|
||||
import org.apache.hadoop.fs.shell.CommandFactory;
|
||||
import org.apache.hadoop.fs.shell.FsCommand;
|
||||
import org.apache.hadoop.tracing.SpanReceiverHost;
|
||||
import org.apache.hadoop.tools.TableListing;
|
||||
import org.apache.hadoop.tracing.TraceUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.SamplerBuilder;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
|
||||
/** Provide command line access to a FileSystem. */
|
||||
@InterfaceAudience.Private
|
||||
|
@ -47,10 +54,14 @@ public class FsShell extends Configured implements Tool {
|
|||
private FileSystem fs;
|
||||
private Trash trash;
|
||||
protected CommandFactory commandFactory;
|
||||
private Sampler traceSampler;
|
||||
|
||||
private final String usagePrefix =
|
||||
"Usage: hadoop fs [generic options]";
|
||||
|
||||
private SpanReceiverHost spanReceiverHost;
|
||||
static final String SEHLL_HTRACE_PREFIX = "dfs.shell.htrace.";
|
||||
|
||||
/**
|
||||
* Default ctor with no configuration. Be sure to invoke
|
||||
* {@link #setConf(Configuration)} with a valid configuration prior
|
||||
|
@ -91,6 +102,8 @@ public class FsShell extends Configured implements Tool {
|
|||
commandFactory.addObject(new Usage(), "-usage");
|
||||
registerCommands(commandFactory);
|
||||
}
|
||||
this.spanReceiverHost =
|
||||
SpanReceiverHost.get(getConf(), SEHLL_HTRACE_PREFIX);
|
||||
}
|
||||
|
||||
protected void registerCommands(CommandFactory factory) {
|
||||
|
@ -276,7 +289,8 @@ public class FsShell extends Configured implements Tool {
|
|||
public int run(String argv[]) throws Exception {
|
||||
// initialize FsShell
|
||||
init();
|
||||
|
||||
traceSampler = new SamplerBuilder(TraceUtils.
|
||||
wrapHadoopConf(SEHLL_HTRACE_PREFIX, getConf())).build();
|
||||
int exitCode = -1;
|
||||
if (argv.length < 1) {
|
||||
printUsage(System.err);
|
||||
|
@ -288,7 +302,19 @@ public class FsShell extends Configured implements Tool {
|
|||
if (instance == null) {
|
||||
throw new UnknownCommandException();
|
||||
}
|
||||
exitCode = instance.run(Arrays.copyOfRange(argv, 1, argv.length));
|
||||
TraceScope scope = Trace.startSpan(instance.getCommandName(), traceSampler);
|
||||
if (scope.getSpan() != null) {
|
||||
String args = StringUtils.join(" ", argv);
|
||||
if (args.length() > 2048) {
|
||||
args = args.substring(0, 2048);
|
||||
}
|
||||
scope.getSpan().addKVAnnotation("args", args);
|
||||
}
|
||||
try {
|
||||
exitCode = instance.run(Arrays.copyOfRange(argv, 1, argv.length));
|
||||
} finally {
|
||||
scope.close();
|
||||
}
|
||||
} catch (IllegalArgumentException e) {
|
||||
displayError(cmd, e.getLocalizedMessage());
|
||||
if (instance != null) {
|
||||
|
@ -327,6 +353,9 @@ public class FsShell extends Configured implements Tool {
|
|||
fs.close();
|
||||
fs = null;
|
||||
}
|
||||
if (this.spanReceiverHost != null) {
|
||||
this.spanReceiverHost.closeReceivers();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
|
|||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -27,6 +28,10 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
class Globber {
|
||||
|
@ -135,6 +140,19 @@ class Globber {
|
|||
}
|
||||
|
||||
public FileStatus[] glob() throws IOException {
|
||||
TraceScope scope = Trace.startSpan("Globber#glob");
|
||||
Span span = scope.getSpan();
|
||||
if (span != null) {
|
||||
span.addKVAnnotation("pattern", pathPattern.toUri().getPath());
|
||||
}
|
||||
try {
|
||||
return doGlob();
|
||||
} finally {
|
||||
scope.close();
|
||||
}
|
||||
}
|
||||
|
||||
private FileStatus[] doGlob() throws IOException {
|
||||
// First we get the scheme and authority of the pattern that was passed
|
||||
// in.
|
||||
String scheme = schemeFromPath(pathPattern);
|
||||
|
@ -285,6 +303,14 @@ class Globber {
|
|||
(flattenedPatterns.size() <= 1)) {
|
||||
return null;
|
||||
}
|
||||
return results.toArray(new FileStatus[0]);
|
||||
/*
|
||||
* In general, the results list will already be sorted, since listStatus
|
||||
* returns results in sorted order for many Hadoop filesystems. However,
|
||||
* not all Hadoop filesystems have this property. So we sort here in order
|
||||
* to get consistent results. See HADOOP-10798 for details.
|
||||
*/
|
||||
FileStatus ret[] = results.toArray(new FileStatus[0]);
|
||||
Arrays.sort(ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,17 +90,13 @@ public class LocatedFileStatus extends FileStatus {
|
|||
}
|
||||
|
||||
/**
|
||||
* Compare this object to another object
|
||||
*
|
||||
* @param o the object to be compared.
|
||||
* Compare this FileStatus to another FileStatus
|
||||
* @param o the FileStatus to be compared.
|
||||
* @return a negative integer, zero, or a positive integer as this object
|
||||
* is less than, equal to, or greater than the specified object.
|
||||
*
|
||||
* @throws ClassCastException if the specified object's is not of
|
||||
* type FileStatus
|
||||
*/
|
||||
@Override
|
||||
public int compareTo(Object o) {
|
||||
public int compareTo(FileStatus o) {
|
||||
return super.compareTo(o);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,11 @@ import java.io.OutputStream;
|
|||
import java.io.FileDescriptor;
|
||||
import java.net.URI;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.nio.file.attribute.BasicFileAttributeView;
|
||||
import java.nio.file.attribute.FileTime;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.StringTokenizer;
|
||||
|
@ -59,6 +64,8 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
// Temporary workaround for HADOOP-9652.
|
||||
private static boolean useDeprecatedFileStatus = true;
|
||||
|
||||
private FsPermission umask;
|
||||
|
||||
@VisibleForTesting
|
||||
public static void useStatIfAvailable() {
|
||||
useDeprecatedFileStatus = !Stat.isAvailable();
|
||||
|
@ -92,6 +99,7 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
public void initialize(URI uri, Configuration conf) throws IOException {
|
||||
super.initialize(uri, conf);
|
||||
setConf(conf);
|
||||
umask = FsPermission.getUMask(conf);
|
||||
}
|
||||
|
||||
/*******************************************************
|
||||
|
@ -211,9 +219,13 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
private LocalFSFileOutputStream(Path f, boolean append,
|
||||
FsPermission permission) throws IOException {
|
||||
File file = pathToFile(f);
|
||||
if (!append && permission == null) {
|
||||
permission = FsPermission.getFileDefault();
|
||||
}
|
||||
if (permission == null) {
|
||||
this.fos = new FileOutputStream(file, append);
|
||||
} else {
|
||||
permission = permission.applyUMask(umask);
|
||||
if (Shell.WINDOWS && NativeIO.isAvailable()) {
|
||||
this.fos = NativeIO.Windows.createFileOutputStreamWithMode(file,
|
||||
append, permission.toShort());
|
||||
|
@ -264,11 +276,13 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
if (!exists(f)) {
|
||||
throw new FileNotFoundException("File " + f + " not found");
|
||||
}
|
||||
if (getFileStatus(f).isDirectory()) {
|
||||
FileStatus status = getFileStatus(f);
|
||||
if (status.isDirectory()) {
|
||||
throw new IOException("Cannot append to a diretory (=" + f + " )");
|
||||
}
|
||||
return new FSDataOutputStream(new BufferedOutputStream(
|
||||
createOutputStreamWithMode(f, true, null), bufferSize), statistics);
|
||||
createOutputStreamWithMode(f, true, null), bufferSize), statistics,
|
||||
status.getLen());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -446,35 +460,38 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
if (!localf.exists()) {
|
||||
throw new FileNotFoundException("File " + f + " does not exist");
|
||||
}
|
||||
if (localf.isFile()) {
|
||||
if (!useDeprecatedFileStatus) {
|
||||
return new FileStatus[] { getFileStatus(f) };
|
||||
|
||||
if (localf.isDirectory()) {
|
||||
String[] names = localf.list();
|
||||
if (names == null) {
|
||||
return null;
|
||||
}
|
||||
return new FileStatus[] {
|
||||
new DeprecatedRawLocalFileStatus(localf, getDefaultBlockSize(f), this)};
|
||||
results = new FileStatus[names.length];
|
||||
int j = 0;
|
||||
for (int i = 0; i < names.length; i++) {
|
||||
try {
|
||||
// Assemble the path using the Path 3 arg constructor to make sure
|
||||
// paths with colon are properly resolved on Linux
|
||||
results[j] = getFileStatus(new Path(f, new Path(null, null,
|
||||
names[i])));
|
||||
j++;
|
||||
} catch (FileNotFoundException e) {
|
||||
// ignore the files not found since the dir list may have have
|
||||
// changed since the names[] list was generated.
|
||||
}
|
||||
}
|
||||
if (j == names.length) {
|
||||
return results;
|
||||
}
|
||||
return Arrays.copyOf(results, j);
|
||||
}
|
||||
|
||||
String[] names = localf.list();
|
||||
if (names == null) {
|
||||
return null;
|
||||
if (!useDeprecatedFileStatus) {
|
||||
return new FileStatus[] { getFileStatus(f) };
|
||||
}
|
||||
results = new FileStatus[names.length];
|
||||
int j = 0;
|
||||
for (int i = 0; i < names.length; i++) {
|
||||
try {
|
||||
// Assemble the path using the Path 3 arg constructor to make sure
|
||||
// paths with colon are properly resolved on Linux
|
||||
results[j] = getFileStatus(new Path(f, new Path(null, null, names[i])));
|
||||
j++;
|
||||
} catch (FileNotFoundException e) {
|
||||
// ignore the files not found since the dir list may have have changed
|
||||
// since the names[] list was generated.
|
||||
}
|
||||
}
|
||||
if (j == names.length) {
|
||||
return results;
|
||||
}
|
||||
return Arrays.copyOf(results, j);
|
||||
return new FileStatus[] {
|
||||
new DeprecatedRawLocalFileStatus(localf,
|
||||
getDefaultBlockSize(f), this) };
|
||||
}
|
||||
|
||||
protected boolean mkOneDir(File p2f) throws IOException {
|
||||
|
@ -484,27 +501,27 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
protected boolean mkOneDirWithMode(Path p, File p2f, FsPermission permission)
|
||||
throws IOException {
|
||||
if (permission == null) {
|
||||
return p2f.mkdir();
|
||||
} else {
|
||||
if (Shell.WINDOWS && NativeIO.isAvailable()) {
|
||||
try {
|
||||
NativeIO.Windows.createDirectoryWithMode(p2f, permission.toShort());
|
||||
return true;
|
||||
} catch (IOException e) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(String.format(
|
||||
"NativeIO.createDirectoryWithMode error, path = %s, mode = %o",
|
||||
p2f, permission.toShort()), e);
|
||||
}
|
||||
return false;
|
||||
permission = FsPermission.getDirDefault();
|
||||
}
|
||||
permission = permission.applyUMask(umask);
|
||||
if (Shell.WINDOWS && NativeIO.isAvailable()) {
|
||||
try {
|
||||
NativeIO.Windows.createDirectoryWithMode(p2f, permission.toShort());
|
||||
return true;
|
||||
} catch (IOException e) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(String.format(
|
||||
"NativeIO.createDirectoryWithMode error, path = %s, mode = %o",
|
||||
p2f, permission.toShort()), e);
|
||||
}
|
||||
} else {
|
||||
boolean b = p2f.mkdir();
|
||||
if (b) {
|
||||
setPermission(p, permission);
|
||||
}
|
||||
return b;
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
boolean b = p2f.mkdir();
|
||||
if (b) {
|
||||
setPermission(p, permission);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -635,9 +652,23 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
return !super.getOwner().isEmpty();
|
||||
}
|
||||
|
||||
DeprecatedRawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) {
|
||||
private static long getLastAccessTime(File f) throws IOException {
|
||||
long accessTime;
|
||||
try {
|
||||
accessTime = Files.readAttributes(f.toPath(),
|
||||
BasicFileAttributes.class).lastAccessTime().toMillis();
|
||||
} catch (NoSuchFileException e) {
|
||||
throw new FileNotFoundException("File " + f + " does not exist");
|
||||
}
|
||||
return accessTime;
|
||||
}
|
||||
|
||||
DeprecatedRawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs)
|
||||
throws IOException {
|
||||
super(f.length(), f.isDirectory(), 1, defaultBlockSize,
|
||||
f.lastModified(), new Path(f.getPath()).makeQualified(fs.getUri(),
|
||||
f.lastModified(), getLastAccessTime(f),
|
||||
null, null, null,
|
||||
new Path(f.getPath()).makeQualified(fs.getUri(),
|
||||
fs.getWorkingDirectory()));
|
||||
}
|
||||
|
||||
|
@ -749,24 +780,23 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets the {@link Path}'s last modified time <em>only</em> to the given
|
||||
* valid time.
|
||||
* Sets the {@link Path}'s last modified time and last access time to
|
||||
* the given valid times.
|
||||
*
|
||||
* @param mtime the modification time to set (only if greater than zero).
|
||||
* @param atime currently ignored.
|
||||
* @throws IOException if setting the last modified time fails.
|
||||
* @param mtime the modification time to set (only if no less than zero).
|
||||
* @param atime the access time to set (only if no less than zero).
|
||||
* @throws IOException if setting the times fails.
|
||||
*/
|
||||
@Override
|
||||
public void setTimes(Path p, long mtime, long atime) throws IOException {
|
||||
File f = pathToFile(p);
|
||||
if(mtime >= 0) {
|
||||
if(!f.setLastModified(mtime)) {
|
||||
throw new IOException(
|
||||
"couldn't set last-modified time to " +
|
||||
mtime +
|
||||
" for " +
|
||||
f.getAbsolutePath());
|
||||
}
|
||||
try {
|
||||
BasicFileAttributeView view = Files.getFileAttributeView(
|
||||
pathToFile(p).toPath(), BasicFileAttributeView.class);
|
||||
FileTime fmtime = (mtime >= 0) ? FileTime.fromMillis(mtime) : null;
|
||||
FileTime fatime = (atime >= 0) ? FileTime.fromMillis(atime) : null;
|
||||
view.setTimes(fmtime, fatime, null);
|
||||
} catch (NoSuchFileException e) {
|
||||
throw new FileNotFoundException("File " + p + " does not exist");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -33,10 +33,11 @@ import org.apache.hadoop.util.StringUtils;
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Unstable
|
||||
public enum StorageType {
|
||||
DISK(false),
|
||||
// sorted by the speed of the storage types, from fast to slow
|
||||
RAM_DISK(true),
|
||||
SSD(false),
|
||||
ARCHIVE(false),
|
||||
RAM_DISK(true);
|
||||
DISK(false),
|
||||
ARCHIVE(false);
|
||||
|
||||
private final boolean isTransient;
|
||||
|
||||
|
|
|
@ -89,9 +89,6 @@ public class TrashPolicyDefault extends TrashPolicy {
|
|||
this.emptierInterval = (long)(conf.getFloat(
|
||||
FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
|
||||
* MSECS_PER_MINUTE);
|
||||
LOG.info("Namenode trash configuration: Deletion interval = " +
|
||||
(this.deletionInterval / MSECS_PER_MINUTE) + " minutes, Emptier interval = " +
|
||||
(this.emptierInterval / MSECS_PER_MINUTE) + " minutes.");
|
||||
}
|
||||
|
||||
private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
|
||||
|
@ -251,6 +248,10 @@ public class TrashPolicyDefault extends TrashPolicy {
|
|||
" minutes that is used for deletion instead");
|
||||
this.emptierInterval = deletionInterval;
|
||||
}
|
||||
LOG.info("Namenode trash configuration: Deletion interval = "
|
||||
+ (deletionInterval / MSECS_PER_MINUTE)
|
||||
+ " minutes, Emptier interval = "
|
||||
+ (emptierInterval / MSECS_PER_MINUTE) + " minutes.");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -375,10 +375,7 @@ public class FsPermission implements Writable {
|
|||
public ImmutableFsPermission(short permission) {
|
||||
super(permission);
|
||||
}
|
||||
@Override
|
||||
public FsPermission applyUMask(FsPermission umask) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
@InterfaceStability.Unstable
|
||||
class UmaskParser extends PermissionParser {
|
||||
private static Pattern chmodOctalPattern =
|
||||
Pattern.compile("^\\s*[+]?()([0-7]{3})\\s*$"); // no leading 1 for sticky bit
|
||||
Pattern.compile("^\\s*[+]?(0*)([0-7]{3})\\s*$"); // no leading 1 for sticky bit
|
||||
private static Pattern umaskSymbolicPattern = /* not allow X or t */
|
||||
Pattern.compile("\\G\\s*([ugoa]*)([+=-]+)([rwx]*)([,\\s]*)\\s*");
|
||||
final short umaskMode;
|
||||
|
|
|
@ -0,0 +1,303 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.sftp;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.jcraft.jsch.ChannelSftp;
|
||||
import com.jcraft.jsch.JSch;
|
||||
import com.jcraft.jsch.JSchException;
|
||||
import com.jcraft.jsch.Session;
|
||||
|
||||
/** Concurrent/Multiple Connections. */
|
||||
class SFTPConnectionPool {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
|
||||
// Maximum number of allowed live connections. This doesn't mean we cannot
|
||||
// have more live connections. It means that when we have more
|
||||
// live connections than this threshold, any unused connection will be
|
||||
// closed.
|
||||
private int maxConnection;
|
||||
private int liveConnectionCount = 0;
|
||||
private HashMap<ConnectionInfo, HashSet<ChannelSftp>> idleConnections =
|
||||
new HashMap<ConnectionInfo, HashSet<ChannelSftp>>();
|
||||
private HashMap<ChannelSftp, ConnectionInfo> con2infoMap =
|
||||
new HashMap<ChannelSftp, ConnectionInfo>();
|
||||
|
||||
SFTPConnectionPool(int maxConnection) {
|
||||
this.maxConnection = maxConnection;
|
||||
}
|
||||
|
||||
synchronized ChannelSftp getFromPool(ConnectionInfo info) throws IOException {
|
||||
Set<ChannelSftp> cons = idleConnections.get(info);
|
||||
ChannelSftp channel;
|
||||
|
||||
if (cons != null && cons.size() > 0) {
|
||||
Iterator<ChannelSftp> it = cons.iterator();
|
||||
if (it.hasNext()) {
|
||||
channel = it.next();
|
||||
idleConnections.remove(info);
|
||||
return channel;
|
||||
} else {
|
||||
throw new IOException("Connection pool error.");
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/** Add the channel into pool.
|
||||
* @param channel
|
||||
*/
|
||||
synchronized void returnToPool(ChannelSftp channel) {
|
||||
ConnectionInfo info = con2infoMap.get(channel);
|
||||
HashSet<ChannelSftp> cons = idleConnections.get(info);
|
||||
if (cons == null) {
|
||||
cons = new HashSet<ChannelSftp>();
|
||||
idleConnections.put(info, cons);
|
||||
}
|
||||
cons.add(channel);
|
||||
|
||||
}
|
||||
|
||||
/** Shutdown the connection pool and close all open connections. */
|
||||
synchronized void shutdown() {
|
||||
if (this.con2infoMap == null){
|
||||
return; // already shutdown in case it is called
|
||||
}
|
||||
LOG.info("Inside shutdown, con2infoMap size=" + con2infoMap.size());
|
||||
|
||||
this.maxConnection = 0;
|
||||
Set<ChannelSftp> cons = con2infoMap.keySet();
|
||||
if (cons != null && cons.size() > 0) {
|
||||
// make a copy since we need to modify the underlying Map
|
||||
Set<ChannelSftp> copy = new HashSet<ChannelSftp>(cons);
|
||||
// Initiate disconnect from all outstanding connections
|
||||
for (ChannelSftp con : copy) {
|
||||
try {
|
||||
disconnect(con);
|
||||
} catch (IOException ioe) {
|
||||
ConnectionInfo info = con2infoMap.get(con);
|
||||
LOG.error(
|
||||
"Error encountered while closing connection to " + info.getHost(),
|
||||
ioe);
|
||||
}
|
||||
}
|
||||
}
|
||||
// make sure no further connections can be returned.
|
||||
this.idleConnections = null;
|
||||
this.con2infoMap = null;
|
||||
}
|
||||
|
||||
public synchronized int getMaxConnection() {
|
||||
return maxConnection;
|
||||
}
|
||||
|
||||
public synchronized void setMaxConnection(int maxConn) {
|
||||
this.maxConnection = maxConn;
|
||||
}
|
||||
|
||||
public ChannelSftp connect(String host, int port, String user,
|
||||
String password, String keyFile) throws IOException {
|
||||
// get connection from pool
|
||||
ConnectionInfo info = new ConnectionInfo(host, port, user);
|
||||
ChannelSftp channel = getFromPool(info);
|
||||
|
||||
if (channel != null) {
|
||||
if (channel.isConnected()) {
|
||||
return channel;
|
||||
} else {
|
||||
channel = null;
|
||||
synchronized (this) {
|
||||
--liveConnectionCount;
|
||||
con2infoMap.remove(channel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create a new connection and add to pool
|
||||
JSch jsch = new JSch();
|
||||
Session session = null;
|
||||
try {
|
||||
if (user == null || user.length() == 0) {
|
||||
user = System.getProperty("user.name");
|
||||
}
|
||||
|
||||
if (password == null) {
|
||||
password = "";
|
||||
}
|
||||
|
||||
if (keyFile != null && keyFile.length() > 0) {
|
||||
jsch.addIdentity(keyFile);
|
||||
}
|
||||
|
||||
if (port <= 0) {
|
||||
session = jsch.getSession(user, host);
|
||||
} else {
|
||||
session = jsch.getSession(user, host, port);
|
||||
}
|
||||
|
||||
session.setPassword(password);
|
||||
|
||||
java.util.Properties config = new java.util.Properties();
|
||||
config.put("StrictHostKeyChecking", "no");
|
||||
session.setConfig(config);
|
||||
|
||||
session.connect();
|
||||
channel = (ChannelSftp) session.openChannel("sftp");
|
||||
channel.connect();
|
||||
|
||||
synchronized (this) {
|
||||
con2infoMap.put(channel, info);
|
||||
liveConnectionCount++;
|
||||
}
|
||||
|
||||
return channel;
|
||||
|
||||
} catch (JSchException e) {
|
||||
throw new IOException(StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
|
||||
void disconnect(ChannelSftp channel) throws IOException {
|
||||
if (channel != null) {
|
||||
// close connection if too many active connections
|
||||
boolean closeConnection = false;
|
||||
synchronized (this) {
|
||||
if (liveConnectionCount > maxConnection) {
|
||||
--liveConnectionCount;
|
||||
con2infoMap.remove(channel);
|
||||
closeConnection = true;
|
||||
}
|
||||
}
|
||||
if (closeConnection) {
|
||||
if (channel.isConnected()) {
|
||||
try {
|
||||
Session session = channel.getSession();
|
||||
channel.disconnect();
|
||||
session.disconnect();
|
||||
} catch (JSchException e) {
|
||||
throw new IOException(StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
returnToPool(channel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public int getIdleCount() {
|
||||
return this.idleConnections.size();
|
||||
}
|
||||
|
||||
public int getLiveConnCount() {
|
||||
return this.liveConnectionCount;
|
||||
}
|
||||
|
||||
public int getConnPoolSize() {
|
||||
return this.con2infoMap.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Class to capture the minimal set of information that distinguish
|
||||
* between different connections.
|
||||
*/
|
||||
static class ConnectionInfo {
|
||||
private String host = "";
|
||||
private int port;
|
||||
private String user = "";
|
||||
|
||||
ConnectionInfo(String hst, int prt, String usr) {
|
||||
this.host = hst;
|
||||
this.port = prt;
|
||||
this.user = usr;
|
||||
}
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public void setHost(String hst) {
|
||||
this.host = hst;
|
||||
}
|
||||
|
||||
public int getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public void setPort(int prt) {
|
||||
this.port = prt;
|
||||
}
|
||||
|
||||
public String getUser() {
|
||||
return user;
|
||||
}
|
||||
|
||||
public void setUser(String usr) {
|
||||
this.user = usr;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (obj instanceof ConnectionInfo) {
|
||||
ConnectionInfo con = (ConnectionInfo) obj;
|
||||
|
||||
boolean ret = true;
|
||||
if (this.host == null || !this.host.equalsIgnoreCase(con.host)) {
|
||||
ret = false;
|
||||
}
|
||||
if (this.port >= 0 && this.port != con.port) {
|
||||
ret = false;
|
||||
}
|
||||
if (this.user == null || !this.user.equalsIgnoreCase(con.user)) {
|
||||
ret = false;
|
||||
}
|
||||
return ret;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
if (host != null) {
|
||||
hashCode += host.hashCode();
|
||||
}
|
||||
hashCode += port;
|
||||
if (user != null) {
|
||||
hashCode += user.hashCode();
|
||||
}
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,671 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.sftp;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.URI;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Vector;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
||||
import com.jcraft.jsch.ChannelSftp;
|
||||
import com.jcraft.jsch.ChannelSftp.LsEntry;
|
||||
import com.jcraft.jsch.SftpATTRS;
|
||||
import com.jcraft.jsch.SftpException;
|
||||
|
||||
/** SFTP FileSystem. */
|
||||
public class SFTPFileSystem extends FileSystem {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
|
||||
|
||||
private SFTPConnectionPool connectionPool;
|
||||
private URI uri;
|
||||
|
||||
private static final int DEFAULT_SFTP_PORT = 22;
|
||||
private static final int DEFAULT_MAX_CONNECTION = 5;
|
||||
public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;
|
||||
public static final int DEFAULT_BLOCK_SIZE = 4 * 1024;
|
||||
public static final String FS_SFTP_USER_PREFIX = "fs.sftp.user.";
|
||||
public static final String FS_SFTP_PASSWORD_PREFIX = "fs.sftp.password.";
|
||||
public static final String FS_SFTP_HOST = "fs.sftp.host";
|
||||
public static final String FS_SFTP_HOST_PORT = "fs.sftp.host.port";
|
||||
public static final String FS_SFTP_KEYFILE = "fs.sftp.keyfile";
|
||||
public static final String FS_SFTP_CONNECTION_MAX = "fs.sftp.connection.max";
|
||||
public static final String E_SAME_DIRECTORY_ONLY =
|
||||
"only same directory renames are supported";
|
||||
public static final String E_HOST_NULL = "Invalid host specified";
|
||||
public static final String E_USER_NULL =
|
||||
"No user specified for sftp connection. Expand URI or credential file.";
|
||||
public static final String E_PATH_DIR = "Path %s is a directory.";
|
||||
public static final String E_FILE_STATUS = "Failed to get file status";
|
||||
public static final String E_FILE_NOTFOUND = "File %s does not exist.";
|
||||
public static final String E_FILE_EXIST = "File already exists: %s";
|
||||
public static final String E_CREATE_DIR =
|
||||
"create(): Mkdirs failed to create: %s";
|
||||
public static final String E_DIR_CREATE_FROMFILE =
|
||||
"Can't make directory for path %s since it is a file.";
|
||||
public static final String E_MAKE_DIR_FORPATH =
|
||||
"Can't make directory for path \"%s\" under \"%s\".";
|
||||
public static final String E_DIR_NOTEMPTY = "Directory: %s is not empty.";
|
||||
public static final String E_FILE_CHECK_FAILED = "File check failed";
|
||||
public static final String E_NOT_SUPPORTED = "Not supported";
|
||||
public static final String E_SPATH_NOTEXIST = "Source path %s does not exist";
|
||||
public static final String E_DPATH_EXIST =
|
||||
"Destination path %s already exist, cannot rename!";
|
||||
public static final String E_FAILED_GETHOME = "Failed to get home directory";
|
||||
public static final String E_FAILED_DISCONNECT = "Failed to disconnect";
|
||||
|
||||
/**
|
||||
* Set configuration from UI.
|
||||
*
|
||||
* @param uri
|
||||
* @param conf
|
||||
* @throws IOException
|
||||
*/
|
||||
private void setConfigurationFromURI(URI uriInfo, Configuration conf)
|
||||
throws IOException {
|
||||
|
||||
// get host information from URI
|
||||
String host = uriInfo.getHost();
|
||||
host = (host == null) ? conf.get(FS_SFTP_HOST, null) : host;
|
||||
if (host == null) {
|
||||
throw new IOException(E_HOST_NULL);
|
||||
}
|
||||
conf.set(FS_SFTP_HOST, host);
|
||||
|
||||
int port = uriInfo.getPort();
|
||||
port = (port == -1)
|
||||
? conf.getInt(FS_SFTP_HOST_PORT, DEFAULT_SFTP_PORT)
|
||||
: port;
|
||||
conf.setInt(FS_SFTP_HOST_PORT, port);
|
||||
|
||||
// get user/password information from URI
|
||||
String userAndPwdFromUri = uriInfo.getUserInfo();
|
||||
if (userAndPwdFromUri != null) {
|
||||
String[] userPasswdInfo = userAndPwdFromUri.split(":");
|
||||
String user = userPasswdInfo[0];
|
||||
user = URLDecoder.decode(user, "UTF-8");
|
||||
conf.set(FS_SFTP_USER_PREFIX + host, user);
|
||||
if (userPasswdInfo.length > 1) {
|
||||
conf.set(FS_SFTP_PASSWORD_PREFIX + host + "." +
|
||||
user, userPasswdInfo[1]);
|
||||
}
|
||||
}
|
||||
|
||||
String user = conf.get(FS_SFTP_USER_PREFIX + host);
|
||||
if (user == null || user.equals("")) {
|
||||
throw new IllegalStateException(E_USER_NULL);
|
||||
}
|
||||
|
||||
int connectionMax =
|
||||
conf.getInt(FS_SFTP_CONNECTION_MAX, DEFAULT_MAX_CONNECTION);
|
||||
connectionPool = new SFTPConnectionPool(connectionMax);
|
||||
}
|
||||
|
||||
/**
|
||||
* Connecting by using configuration parameters.
|
||||
*
|
||||
* @return An FTPClient instance
|
||||
* @throws IOException
|
||||
*/
|
||||
private ChannelSftp connect() throws IOException {
|
||||
Configuration conf = getConf();
|
||||
|
||||
String host = conf.get(FS_SFTP_HOST, null);
|
||||
int port = conf.getInt(FS_SFTP_HOST_PORT, DEFAULT_SFTP_PORT);
|
||||
String user = conf.get(FS_SFTP_USER_PREFIX + host, null);
|
||||
String pwd = conf.get(FS_SFTP_PASSWORD_PREFIX + host + "." + user, null);
|
||||
String keyFile = conf.get(FS_SFTP_KEYFILE, null);
|
||||
|
||||
ChannelSftp channel =
|
||||
connectionPool.connect(host, port, user, pwd, keyFile);
|
||||
|
||||
return channel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Logout and disconnect the given channel.
|
||||
*
|
||||
* @param client
|
||||
* @throws IOException
|
||||
*/
|
||||
private void disconnect(ChannelSftp channel) throws IOException {
|
||||
connectionPool.disconnect(channel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve against given working directory.
|
||||
*
|
||||
* @param workDir
|
||||
* @param path
|
||||
* @return absolute path
|
||||
*/
|
||||
private Path makeAbsolute(Path workDir, Path path) {
|
||||
if (path.isAbsolute()) {
|
||||
return path;
|
||||
}
|
||||
return new Path(workDir, path);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience method, so that we don't open a new connection when using this
|
||||
* method from within another method. Otherwise every API invocation incurs
|
||||
* the overhead of opening/closing a TCP connection.
|
||||
* @throws IOException
|
||||
*/
|
||||
private boolean exists(ChannelSftp channel, Path file) throws IOException {
|
||||
try {
|
||||
getFileStatus(channel, file);
|
||||
return true;
|
||||
} catch (FileNotFoundException fnfe) {
|
||||
return false;
|
||||
} catch (IOException ioe) {
|
||||
throw new IOException(E_FILE_STATUS, ioe);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience method, so that we don't open a new connection when using this
|
||||
* method from within another method. Otherwise every API invocation incurs
|
||||
* the overhead of opening/closing a TCP connection.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
private FileStatus getFileStatus(ChannelSftp client, Path file)
|
||||
throws IOException {
|
||||
FileStatus fileStat = null;
|
||||
Path workDir;
|
||||
try {
|
||||
workDir = new Path(client.pwd());
|
||||
} catch (SftpException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
Path absolute = makeAbsolute(workDir, file);
|
||||
Path parentPath = absolute.getParent();
|
||||
if (parentPath == null) { // root directory
|
||||
long length = -1; // Length of root directory on server not known
|
||||
boolean isDir = true;
|
||||
int blockReplication = 1;
|
||||
long blockSize = DEFAULT_BLOCK_SIZE; // Block Size not known.
|
||||
long modTime = -1; // Modification time of root directory not known.
|
||||
Path root = new Path("/");
|
||||
return new FileStatus(length, isDir, blockReplication, blockSize,
|
||||
modTime,
|
||||
root.makeQualified(this.getUri(), this.getWorkingDirectory()));
|
||||
}
|
||||
String pathName = parentPath.toUri().getPath();
|
||||
Vector<LsEntry> sftpFiles;
|
||||
try {
|
||||
sftpFiles = (Vector<LsEntry>) client.ls(pathName);
|
||||
} catch (SftpException e) {
|
||||
throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file));
|
||||
}
|
||||
if (sftpFiles != null) {
|
||||
for (LsEntry sftpFile : sftpFiles) {
|
||||
if (sftpFile.getFilename().equals(file.getName())) {
|
||||
// file found in directory
|
||||
fileStat = getFileStatus(client, sftpFile, parentPath);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (fileStat == null) {
|
||||
throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file));
|
||||
}
|
||||
} else {
|
||||
throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file));
|
||||
}
|
||||
return fileStat;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert the file information in LsEntry to a {@link FileStatus} object. *
|
||||
*
|
||||
* @param sftpFile
|
||||
* @param parentPath
|
||||
* @return file status
|
||||
* @throws IOException
|
||||
*/
|
||||
private FileStatus getFileStatus(ChannelSftp channel, LsEntry sftpFile,
|
||||
Path parentPath) throws IOException {
|
||||
|
||||
SftpATTRS attr = sftpFile.getAttrs();
|
||||
long length = attr.getSize();
|
||||
boolean isDir = attr.isDir();
|
||||
boolean isLink = attr.isLink();
|
||||
if (isLink) {
|
||||
String link = parentPath.toUri().getPath() + "/" + sftpFile.getFilename();
|
||||
try {
|
||||
link = channel.realpath(link);
|
||||
|
||||
Path linkParent = new Path("/", link);
|
||||
|
||||
FileStatus fstat = getFileStatus(channel, linkParent);
|
||||
isDir = fstat.isDirectory();
|
||||
length = fstat.getLen();
|
||||
} catch (Exception e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
int blockReplication = 1;
|
||||
// Using default block size since there is no way in SFTP channel to know of
|
||||
// block sizes on server. The assumption could be less than ideal.
|
||||
long blockSize = DEFAULT_BLOCK_SIZE;
|
||||
long modTime = attr.getMTime() * 1000; // convert to milliseconds
|
||||
long accessTime = 0;
|
||||
FsPermission permission = getPermissions(sftpFile);
|
||||
// not be able to get the real user group name, just use the user and group
|
||||
// id
|
||||
String user = Integer.toString(attr.getUId());
|
||||
String group = Integer.toString(attr.getGId());
|
||||
Path filePath = new Path(parentPath, sftpFile.getFilename());
|
||||
|
||||
return new FileStatus(length, isDir, blockReplication, blockSize, modTime,
|
||||
accessTime, permission, user, group, filePath.makeQualified(
|
||||
this.getUri(), this.getWorkingDirectory()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Return file permission.
|
||||
*
|
||||
* @param sftpFile
|
||||
* @return file permission
|
||||
*/
|
||||
private FsPermission getPermissions(LsEntry sftpFile) {
|
||||
return new FsPermission((short) sftpFile.getAttrs().getPermissions());
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience method, so that we don't open a new connection when using this
|
||||
* method from within another method. Otherwise every API invocation incurs
|
||||
* the overhead of opening/closing a TCP connection.
|
||||
*/
|
||||
private boolean mkdirs(ChannelSftp client, Path file, FsPermission permission)
|
||||
throws IOException {
|
||||
boolean created = true;
|
||||
Path workDir;
|
||||
try {
|
||||
workDir = new Path(client.pwd());
|
||||
} catch (SftpException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
Path absolute = makeAbsolute(workDir, file);
|
||||
String pathName = absolute.getName();
|
||||
if (!exists(client, absolute)) {
|
||||
Path parent = absolute.getParent();
|
||||
created =
|
||||
(parent == null || mkdirs(client, parent, FsPermission.getDefault()));
|
||||
if (created) {
|
||||
String parentDir = parent.toUri().getPath();
|
||||
boolean succeeded = true;
|
||||
try {
|
||||
client.cd(parentDir);
|
||||
client.mkdir(pathName);
|
||||
} catch (SftpException e) {
|
||||
throw new IOException(String.format(E_MAKE_DIR_FORPATH, pathName,
|
||||
parentDir));
|
||||
}
|
||||
created = created & succeeded;
|
||||
}
|
||||
} else if (isFile(client, absolute)) {
|
||||
throw new IOException(String.format(E_DIR_CREATE_FROMFILE, absolute));
|
||||
}
|
||||
return created;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience method, so that we don't open a new connection when using this
|
||||
* method from within another method. Otherwise every API invocation incurs
|
||||
* the overhead of opening/closing a TCP connection.
|
||||
* @throws IOException
|
||||
*/
|
||||
private boolean isFile(ChannelSftp channel, Path file) throws IOException {
|
||||
try {
|
||||
return !getFileStatus(channel, file).isDirectory();
|
||||
} catch (FileNotFoundException e) {
|
||||
return false; // file does not exist
|
||||
} catch (IOException ioe) {
|
||||
throw new IOException(E_FILE_CHECK_FAILED, ioe);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience method, so that we don't open a new connection when using this
|
||||
* method from within another method. Otherwise every API invocation incurs
|
||||
* the overhead of opening/closing a TCP connection.
|
||||
*/
|
||||
private boolean delete(ChannelSftp channel, Path file, boolean recursive)
|
||||
throws IOException {
|
||||
Path workDir;
|
||||
try {
|
||||
workDir = new Path(channel.pwd());
|
||||
} catch (SftpException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
Path absolute = makeAbsolute(workDir, file);
|
||||
String pathName = absolute.toUri().getPath();
|
||||
FileStatus fileStat = null;
|
||||
try {
|
||||
fileStat = getFileStatus(channel, absolute);
|
||||
} catch (FileNotFoundException e) {
|
||||
// file not found, no need to delete, return true
|
||||
return false;
|
||||
}
|
||||
if (!fileStat.isDirectory()) {
|
||||
boolean status = true;
|
||||
try {
|
||||
channel.rm(pathName);
|
||||
} catch (SftpException e) {
|
||||
status = false;
|
||||
}
|
||||
return status;
|
||||
} else {
|
||||
boolean status = true;
|
||||
FileStatus[] dirEntries = listStatus(channel, absolute);
|
||||
if (dirEntries != null && dirEntries.length > 0) {
|
||||
if (!recursive) {
|
||||
throw new IOException(String.format(E_DIR_NOTEMPTY, file));
|
||||
}
|
||||
for (int i = 0; i < dirEntries.length; ++i) {
|
||||
delete(channel, new Path(absolute, dirEntries[i].getPath()),
|
||||
recursive);
|
||||
}
|
||||
}
|
||||
try {
|
||||
channel.rmdir(pathName);
|
||||
} catch (SftpException e) {
|
||||
status = false;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience method, so that we don't open a new connection when using this
|
||||
* method from within another method. Otherwise every API invocation incurs
|
||||
* the overhead of opening/closing a TCP connection.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
private FileStatus[] listStatus(ChannelSftp client, Path file)
|
||||
throws IOException {
|
||||
Path workDir;
|
||||
try {
|
||||
workDir = new Path(client.pwd());
|
||||
} catch (SftpException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
Path absolute = makeAbsolute(workDir, file);
|
||||
FileStatus fileStat = getFileStatus(client, absolute);
|
||||
if (!fileStat.isDirectory()) {
|
||||
return new FileStatus[] {fileStat};
|
||||
}
|
||||
Vector<LsEntry> sftpFiles;
|
||||
try {
|
||||
sftpFiles = (Vector<LsEntry>) client.ls(absolute.toUri().getPath());
|
||||
} catch (SftpException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
ArrayList<FileStatus> fileStats = new ArrayList<FileStatus>();
|
||||
for (int i = 0; i < sftpFiles.size(); i++) {
|
||||
LsEntry entry = sftpFiles.get(i);
|
||||
String fname = entry.getFilename();
|
||||
// skip current and parent directory, ie. "." and ".."
|
||||
if (!".".equalsIgnoreCase(fname) && !"..".equalsIgnoreCase(fname)) {
|
||||
fileStats.add(getFileStatus(client, entry, absolute));
|
||||
}
|
||||
}
|
||||
return fileStats.toArray(new FileStatus[fileStats.size()]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience method, so that we don't open a new connection when using this
|
||||
* method from within another method. Otherwise every API invocation incurs
|
||||
* the overhead of opening/closing a TCP connection.
|
||||
*
|
||||
* @param channel
|
||||
* @param src
|
||||
* @param dst
|
||||
* @return rename successful?
|
||||
* @throws IOException
|
||||
*/
|
||||
private boolean rename(ChannelSftp channel, Path src, Path dst)
|
||||
throws IOException {
|
||||
Path workDir;
|
||||
try {
|
||||
workDir = new Path(channel.pwd());
|
||||
} catch (SftpException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
Path absoluteSrc = makeAbsolute(workDir, src);
|
||||
Path absoluteDst = makeAbsolute(workDir, dst);
|
||||
|
||||
if (!exists(channel, absoluteSrc)) {
|
||||
throw new IOException(String.format(E_SPATH_NOTEXIST, src));
|
||||
}
|
||||
if (exists(channel, absoluteDst)) {
|
||||
throw new IOException(String.format(E_DPATH_EXIST, dst));
|
||||
}
|
||||
boolean renamed = true;
|
||||
try {
|
||||
channel.cd("/");
|
||||
channel.rename(src.toUri().getPath(), dst.toUri().getPath());
|
||||
} catch (SftpException e) {
|
||||
renamed = false;
|
||||
}
|
||||
return renamed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize(URI uriInfo, Configuration conf) throws IOException {
|
||||
super.initialize(uriInfo, conf);
|
||||
|
||||
setConfigurationFromURI(uriInfo, conf);
|
||||
setConf(conf);
|
||||
this.uri = uriInfo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public URI getUri() {
|
||||
return uri;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
|
||||
ChannelSftp channel = connect();
|
||||
Path workDir;
|
||||
try {
|
||||
workDir = new Path(channel.pwd());
|
||||
} catch (SftpException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
Path absolute = makeAbsolute(workDir, f);
|
||||
FileStatus fileStat = getFileStatus(channel, absolute);
|
||||
if (fileStat.isDirectory()) {
|
||||
disconnect(channel);
|
||||
throw new IOException(String.format(E_PATH_DIR, f));
|
||||
}
|
||||
InputStream is;
|
||||
try {
|
||||
// the path could be a symbolic link, so get the real path
|
||||
absolute = new Path("/", channel.realpath(absolute.toUri().getPath()));
|
||||
|
||||
is = channel.get(absolute.toUri().getPath());
|
||||
} catch (SftpException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
|
||||
FSDataInputStream fis =
|
||||
new FSDataInputStream(new SFTPInputStream(is, channel, statistics));
|
||||
return fis;
|
||||
}
|
||||
|
||||
/**
|
||||
* A stream obtained via this call must be closed before using other APIs of
|
||||
* this class or else the invocation will block.
|
||||
*/
|
||||
@Override
|
||||
public FSDataOutputStream create(Path f, FsPermission permission,
|
||||
boolean overwrite, int bufferSize, short replication, long blockSize,
|
||||
Progressable progress) throws IOException {
|
||||
final ChannelSftp client = connect();
|
||||
Path workDir;
|
||||
try {
|
||||
workDir = new Path(client.pwd());
|
||||
} catch (SftpException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
Path absolute = makeAbsolute(workDir, f);
|
||||
if (exists(client, f)) {
|
||||
if (overwrite) {
|
||||
delete(client, f, false);
|
||||
} else {
|
||||
disconnect(client);
|
||||
throw new IOException(String.format(E_FILE_EXIST, f));
|
||||
}
|
||||
}
|
||||
Path parent = absolute.getParent();
|
||||
if (parent == null || !mkdirs(client, parent, FsPermission.getDefault())) {
|
||||
parent = (parent == null) ? new Path("/") : parent;
|
||||
disconnect(client);
|
||||
throw new IOException(String.format(E_CREATE_DIR, parent));
|
||||
}
|
||||
OutputStream os;
|
||||
try {
|
||||
client.cd(parent.toUri().getPath());
|
||||
os = client.put(f.getName());
|
||||
} catch (SftpException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
FSDataOutputStream fos = new FSDataOutputStream(os, statistics) {
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
super.close();
|
||||
disconnect(client);
|
||||
}
|
||||
};
|
||||
|
||||
return fos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataOutputStream append(Path f, int bufferSize,
|
||||
Progressable progress)
|
||||
throws IOException {
|
||||
throw new IOException(E_NOT_SUPPORTED);
|
||||
}
|
||||
|
||||
/*
|
||||
* The parent of source and destination can be different. It is suppose to
|
||||
* work like 'move'
|
||||
*/
|
||||
@Override
|
||||
public boolean rename(Path src, Path dst) throws IOException {
|
||||
ChannelSftp channel = connect();
|
||||
try {
|
||||
boolean success = rename(channel, src, dst);
|
||||
return success;
|
||||
} finally {
|
||||
disconnect(channel);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean delete(Path f, boolean recursive) throws IOException {
|
||||
ChannelSftp channel = connect();
|
||||
try {
|
||||
boolean success = delete(channel, f, recursive);
|
||||
return success;
|
||||
} finally {
|
||||
disconnect(channel);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileStatus[] listStatus(Path f) throws IOException {
|
||||
ChannelSftp client = connect();
|
||||
try {
|
||||
FileStatus[] stats = listStatus(client, f);
|
||||
return stats;
|
||||
} finally {
|
||||
disconnect(client);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setWorkingDirectory(Path newDir) {
|
||||
// we do not maintain the working directory state
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path getWorkingDirectory() {
|
||||
// Return home directory always since we do not maintain state.
|
||||
return getHomeDirectory();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path getHomeDirectory() {
|
||||
ChannelSftp channel = null;
|
||||
try {
|
||||
channel = connect();
|
||||
Path homeDir = new Path(channel.pwd());
|
||||
return homeDir;
|
||||
} catch (Exception ioe) {
|
||||
return null;
|
||||
} finally {
|
||||
try {
|
||||
disconnect(channel);
|
||||
} catch (IOException ioe) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
|
||||
ChannelSftp client = connect();
|
||||
try {
|
||||
boolean success = mkdirs(client, f, permission);
|
||||
return success;
|
||||
} finally {
|
||||
disconnect(client);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileStatus getFileStatus(Path f) throws IOException {
|
||||
ChannelSftp channel = connect();
|
||||
try {
|
||||
FileStatus status = getFileStatus(channel, f);
|
||||
return status;
|
||||
} finally {
|
||||
disconnect(channel);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.sftp;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
import org.apache.hadoop.fs.FSInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.jcraft.jsch.ChannelSftp;
|
||||
import com.jcraft.jsch.JSchException;
|
||||
import com.jcraft.jsch.Session;
|
||||
|
||||
/** SFTP FileSystem input stream. */
|
||||
class SFTPInputStream extends FSInputStream {
|
||||
|
||||
public static final String E_SEEK_NOTSUPPORTED = "Seek not supported";
|
||||
public static final String E_CLIENT_NULL =
|
||||
"SFTP client null or not connected";
|
||||
public static final String E_NULL_INPUTSTREAM = "Null InputStream";
|
||||
public static final String E_STREAM_CLOSED = "Stream closed";
|
||||
public static final String E_CLIENT_NOTCONNECTED = "Client not connected";
|
||||
|
||||
private InputStream wrappedStream;
|
||||
private ChannelSftp channel;
|
||||
private FileSystem.Statistics stats;
|
||||
private boolean closed;
|
||||
private long pos;
|
||||
|
||||
SFTPInputStream(InputStream stream, ChannelSftp channel,
|
||||
FileSystem.Statistics stats) {
|
||||
|
||||
if (stream == null) {
|
||||
throw new IllegalArgumentException(E_NULL_INPUTSTREAM);
|
||||
}
|
||||
if (channel == null || !channel.isConnected()) {
|
||||
throw new IllegalArgumentException(E_CLIENT_NULL);
|
||||
}
|
||||
this.wrappedStream = stream;
|
||||
this.channel = channel;
|
||||
this.stats = stats;
|
||||
|
||||
this.pos = 0;
|
||||
this.closed = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void seek(long position) throws IOException {
|
||||
throw new IOException(E_SEEK_NOTSUPPORTED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean seekToNewSource(long targetPos) throws IOException {
|
||||
throw new IOException(E_SEEK_NOTSUPPORTED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getPos() throws IOException {
|
||||
return pos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized int read() throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(E_STREAM_CLOSED);
|
||||
}
|
||||
|
||||
int byteRead = wrappedStream.read();
|
||||
if (byteRead >= 0) {
|
||||
pos++;
|
||||
}
|
||||
if (stats != null & byteRead >= 0) {
|
||||
stats.incrementBytesRead(1);
|
||||
}
|
||||
return byteRead;
|
||||
}
|
||||
|
||||
public synchronized int read(byte[] buf, int off, int len)
|
||||
throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(E_STREAM_CLOSED);
|
||||
}
|
||||
|
||||
int result = wrappedStream.read(buf, off, len);
|
||||
if (result > 0) {
|
||||
pos += result;
|
||||
}
|
||||
if (stats != null & result > 0) {
|
||||
stats.incrementBytesRead(result);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
public synchronized void close() throws IOException {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
super.close();
|
||||
closed = true;
|
||||
if (!channel.isConnected()) {
|
||||
throw new IOException(E_CLIENT_NOTCONNECTED);
|
||||
}
|
||||
|
||||
try {
|
||||
Session session = channel.getSession();
|
||||
channel.disconnect();
|
||||
session.disconnect();
|
||||
} catch (JSchException e) {
|
||||
throw new IOException(StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,12 +15,5 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
|
||||
|
||||
public enum ContainerPreemptEventType {
|
||||
|
||||
DROP_RESERVATION,
|
||||
PREEMPT_CONTAINER,
|
||||
KILL_CONTAINER
|
||||
|
||||
}
|
||||
/** SFTP FileSystem package. */
|
||||
package org.apache.hadoop.fs.sftp;
|
|
@ -25,6 +25,7 @@ import java.util.List;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.PathIOException;
|
||||
import org.apache.hadoop.fs.PathIsDirectoryException;
|
||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||
|
@ -195,9 +196,19 @@ class Delete {
|
|||
@Override
|
||||
protected void processArguments(LinkedList<PathData> args)
|
||||
throws IOException {
|
||||
Trash trash = new Trash(getConf());
|
||||
trash.expunge();
|
||||
trash.checkpoint();
|
||||
FileSystem[] childFileSystems =
|
||||
FileSystem.get(getConf()).getChildFileSystems();
|
||||
if (null != childFileSystems) {
|
||||
for (FileSystem fs : childFileSystems) {
|
||||
Trash trash = new Trash(fs, getConf());
|
||||
trash.expunge();
|
||||
trash.checkpoint();
|
||||
}
|
||||
} else {
|
||||
Trash trash = new Trash(getConf());
|
||||
trash.expunge();
|
||||
trash.checkpoint();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.fs.shell;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -27,8 +28,13 @@ import org.apache.hadoop.fs.FsShellPermissions;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.shell.find.Find;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY;
|
||||
|
||||
/**
|
||||
* Base class for all "hadoop fs" commands
|
||||
* Base class for all "hadoop fs" commands.
|
||||
*/
|
||||
|
||||
@InterfaceAudience.Private
|
||||
|
@ -90,4 +96,26 @@ abstract public class FsCommand extends Command {
|
|||
public int runAll() {
|
||||
return run(args);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processRawArguments(LinkedList<String> args)
|
||||
throws IOException {
|
||||
LinkedList<PathData> expendedArgs = expandArguments(args);
|
||||
// If "fs.defaultFs" is not set appropriately, it warns the user that the
|
||||
// command is not running against HDFS.
|
||||
final boolean displayWarnings = getConf().getBoolean(
|
||||
HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY,
|
||||
HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT);
|
||||
if (displayWarnings) {
|
||||
final String defaultFs = getConf().get(FS_DEFAULT_NAME_KEY);
|
||||
final boolean missingDefaultFs =
|
||||
defaultFs == null || defaultFs.equals(FS_DEFAULT_NAME_DEFAULT);
|
||||
if (missingDefaultFs) {
|
||||
err.printf(
|
||||
"Warning: fs.defaultFs is not set when running \"%s\" command.%n",
|
||||
getCommandName());
|
||||
}
|
||||
}
|
||||
processArguments(expendedArgs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,8 @@ import java.util.Arrays;
|
|||
import java.util.Comparator;
|
||||
import java.util.Date;
|
||||
import java.util.LinkedList;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
@ -102,6 +104,12 @@ class Ls extends FsCommand {
|
|||
|
||||
protected boolean humanReadable = false;
|
||||
|
||||
protected Ls() {}
|
||||
|
||||
protected Ls(Configuration conf) {
|
||||
super(conf);
|
||||
}
|
||||
|
||||
protected String formatSize(long size) {
|
||||
return humanReadable
|
||||
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
|
||||
|
|
|
@ -70,7 +70,8 @@ class Mkdir extends FsCommand {
|
|||
protected void processNonexistentPath(PathData item) throws IOException {
|
||||
// check if parent exists. this is complicated because getParent(a/b/c/) returns a/b/c, but
|
||||
// we want a/b
|
||||
if (!item.fs.exists(new Path(item.path.toString()).getParent()) && !createParents) {
|
||||
if (!createParents &&
|
||||
!item.fs.exists(new Path(item.path.toString()).getParent())) {
|
||||
throw new PathNotFoundException(item.toString());
|
||||
}
|
||||
if (!item.fs.mkdirs(item.path)) {
|
||||
|
|
|
@ -385,6 +385,12 @@ class ChRootedFs extends AbstractFileSystem {
|
|||
myFs.setStoragePolicy(fullPath(path), policyName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockStoragePolicySpi getStoragePolicy(final Path src)
|
||||
throws IOException {
|
||||
return myFs.getStoragePolicy(src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
|
||||
throws IOException {
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.AbstractFileSystem;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -748,6 +749,20 @@ public class ViewFs extends AbstractFileSystem {
|
|||
res.targetFileSystem.setStoragePolicy(res.remainingPath, policyName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the storage policy for a given file or directory.
|
||||
*
|
||||
* @param src file or directory path.
|
||||
* @return storage policy for give file.
|
||||
* @throws IOException
|
||||
*/
|
||||
public BlockStoragePolicySpi getStoragePolicy(final Path src)
|
||||
throws IOException {
|
||||
InodeTree.ResolveResult<AbstractFileSystem> res =
|
||||
fsState.resolve(getUriPath(src), true);
|
||||
return res.targetFileSystem.getStoragePolicy(res.remainingPath);
|
||||
}
|
||||
|
||||
/*
|
||||
* An instance of this class represents an internal dir of the viewFs
|
||||
* ie internal dir of the mount table.
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.fs.viewfs;
|
||||
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.LocatedFileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -120,7 +121,7 @@ class ViewFsLocatedFileStatus extends LocatedFileStatus {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(Object o) {
|
||||
public int compareTo(FileStatus o) {
|
||||
return super.compareTo(o);
|
||||
}
|
||||
|
||||
|
|
|
@ -173,6 +173,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
|
|||
|
||||
private Lock sessionReestablishLockForTests = new ReentrantLock();
|
||||
private boolean wantToBeInElection;
|
||||
private boolean monitorLockNodePending = false;
|
||||
private ZooKeeper monitorLockNodeClient;
|
||||
|
||||
/**
|
||||
* Create a new ActiveStandbyElector object <br/>
|
||||
|
@ -468,6 +470,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
|
|||
public synchronized void processResult(int rc, String path, Object ctx,
|
||||
Stat stat) {
|
||||
if (isStaleClient(ctx)) return;
|
||||
monitorLockNodePending = false;
|
||||
|
||||
assert wantToBeInElection :
|
||||
"Got a StatNode result after quitting election";
|
||||
|
@ -744,6 +747,11 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
|
|||
return state;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
synchronized boolean isMonitorLockNodePending() {
|
||||
return monitorLockNodePending;
|
||||
}
|
||||
|
||||
private boolean reEstablishSession() {
|
||||
int connectionRetryCount = 0;
|
||||
boolean success = false;
|
||||
|
@ -949,6 +957,12 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
|
|||
}
|
||||
|
||||
private void monitorLockNodeAsync() {
|
||||
if (monitorLockNodePending && monitorLockNodeClient == zkClient) {
|
||||
LOG.info("Ignore duplicate monitor lock-node request.");
|
||||
return;
|
||||
}
|
||||
monitorLockNodePending = true;
|
||||
monitorLockNodeClient = zkClient;
|
||||
zkClient.exists(zkLockFilePath,
|
||||
watcher, this,
|
||||
zkClient);
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
import java.net.InetSocketAddress;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Executors;
|
||||
|
@ -84,7 +85,7 @@ public abstract class ZKFailoverController {
|
|||
};
|
||||
|
||||
protected static final String USAGE =
|
||||
"Usage: java zkfc [ -formatZK [-force] [-nonInteractive] ]";
|
||||
"Usage: hdfs zkfc [ -formatZK [-force] [-nonInteractive] ]";
|
||||
|
||||
/** Unable to format the parent znode in ZK */
|
||||
static final int ERR_CODE_FORMAT_DENIED = 2;
|
||||
|
@ -141,6 +142,7 @@ public abstract class ZKFailoverController {
|
|||
throws AccessControlException, IOException;
|
||||
protected abstract InetSocketAddress getRpcAddressToBindTo();
|
||||
protected abstract PolicyProvider getPolicyProvider();
|
||||
protected abstract List<HAServiceTarget> getAllOtherNodes();
|
||||
|
||||
/**
|
||||
* Return the name of a znode inside the configured parent znode in which
|
||||
|
@ -616,9 +618,11 @@ public abstract class ZKFailoverController {
|
|||
* Coordinate a graceful failover. This proceeds in several phases:
|
||||
* 1) Pre-flight checks: ensure that the local node is healthy, and
|
||||
* thus a candidate for failover.
|
||||
* 2) Determine the current active node. If it is the local node, no
|
||||
* 2a) Determine the current active node. If it is the local node, no
|
||||
* need to failover - return success.
|
||||
* 3) Ask that node to yield from the election for a number of seconds.
|
||||
* 2b) Get the other nodes
|
||||
* 3a) Ask the other nodes to yield from election for a number of seconds
|
||||
* 3b) Ask the active node to yield from the election for a number of seconds.
|
||||
* 4) Allow the normal election path to run in other threads. Wait until
|
||||
* we either become unhealthy or we see an election attempt recorded by
|
||||
* the normal code path.
|
||||
|
@ -649,11 +653,26 @@ public abstract class ZKFailoverController {
|
|||
return;
|
||||
}
|
||||
|
||||
// Phase 3: ask the old active to yield from the election.
|
||||
LOG.info("Asking " + oldActive + " to cede its active state for " +
|
||||
timeout + "ms");
|
||||
ZKFCProtocol oldZkfc = oldActive.getZKFCProxy(conf, timeout);
|
||||
oldZkfc.cedeActive(timeout);
|
||||
// Phase 2b: get the other nodes
|
||||
List<HAServiceTarget> otherNodes = getAllOtherNodes();
|
||||
List<ZKFCProtocol> otherZkfcs = new ArrayList<ZKFCProtocol>(otherNodes.size());
|
||||
|
||||
// Phase 3: ask the other nodes to yield from the election.
|
||||
HAServiceTarget activeNode = null;
|
||||
for (HAServiceTarget remote : otherNodes) {
|
||||
// same location, same node - may not always be == equality
|
||||
if (remote.getAddress().equals(oldActive.getAddress())) {
|
||||
activeNode = remote;
|
||||
continue;
|
||||
}
|
||||
otherZkfcs.add(cedeRemoteActive(remote, timeout));
|
||||
}
|
||||
|
||||
assert
|
||||
activeNode != null : "Active node does not match any known remote node";
|
||||
|
||||
// Phase 3b: ask the old active to yield
|
||||
otherZkfcs.add(cedeRemoteActive(activeNode, timeout));
|
||||
|
||||
// Phase 4: wait for the normal election to make the local node
|
||||
// active.
|
||||
|
@ -676,7 +695,9 @@ public abstract class ZKFailoverController {
|
|||
// Phase 5. At this point, we made some attempt to become active. So we
|
||||
// can tell the old active to rejoin if it wants. This allows a quick
|
||||
// fail-back if we immediately crash.
|
||||
oldZkfc.cedeActive(-1);
|
||||
for (ZKFCProtocol zkfc : otherZkfcs) {
|
||||
zkfc.cedeActive(-1);
|
||||
}
|
||||
|
||||
if (attempt.succeeded) {
|
||||
LOG.info("Successfully became active. " + attempt.status);
|
||||
|
@ -687,6 +708,23 @@ public abstract class ZKFailoverController {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ask the remote zkfc to cede its active status and wait for the specified
|
||||
* timeout before attempting to claim leader status.
|
||||
* @param remote node to ask
|
||||
* @param timeout amount of time to cede
|
||||
* @return the {@link ZKFCProtocol} used to talk to the ndoe
|
||||
* @throws IOException
|
||||
*/
|
||||
private ZKFCProtocol cedeRemoteActive(HAServiceTarget remote, int timeout)
|
||||
throws IOException {
|
||||
LOG.info("Asking " + remote + " to cede its active state for "
|
||||
+ timeout + "ms");
|
||||
ZKFCProtocol oldZkfc = remote.getZKFCProxy(conf, timeout);
|
||||
oldZkfc.cedeActive(timeout);
|
||||
return oldZkfc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure that the local node is in a healthy state, and thus
|
||||
* eligible for graceful failover.
|
||||
|
@ -777,7 +815,8 @@ public abstract class ZKFailoverController {
|
|||
break;
|
||||
|
||||
default:
|
||||
throw new IllegalArgumentException("Unhandled state:" + lastHealthState);
|
||||
throw new IllegalArgumentException("Unhandled state:"
|
||||
+ lastHealthState);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,8 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
public class MD5Hash implements WritableComparable<MD5Hash> {
|
||||
public static final int MD5_LEN = 16;
|
||||
|
||||
private static ThreadLocal<MessageDigest> DIGESTER_FACTORY = new ThreadLocal<MessageDigest>() {
|
||||
private static final ThreadLocal<MessageDigest> DIGESTER_FACTORY =
|
||||
new ThreadLocal<MessageDigest>() {
|
||||
@Override
|
||||
protected MessageDigest initialValue() {
|
||||
try {
|
||||
|
|
|
@ -29,6 +29,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
|
||||
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_WILLNEED;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
|
||||
|
@ -204,7 +206,7 @@ public class ReadaheadPool {
|
|||
// other FD, which may be wasted work, but won't cause a problem.
|
||||
try {
|
||||
NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(identifier,
|
||||
fd, off, len, NativeIO.POSIX.POSIX_FADV_WILLNEED);
|
||||
fd, off, len, POSIX_FADV_WILLNEED);
|
||||
} catch (IOException ioe) {
|
||||
if (canceled) {
|
||||
// no big deal - the reader canceled the request and closed
|
||||
|
|
|
@ -838,6 +838,8 @@ public class SequenceFile {
|
|||
Metadata metadata = null;
|
||||
Compressor compressor = null;
|
||||
|
||||
private boolean appendMode = false;
|
||||
|
||||
protected Serializer keySerializer;
|
||||
protected Serializer uncompressedValSerializer;
|
||||
protected Serializer compressedValSerializer;
|
||||
|
@ -909,6 +911,13 @@ public class SequenceFile {
|
|||
}
|
||||
}
|
||||
|
||||
static class AppendIfExistsOption extends Options.BooleanOption implements
|
||||
Option {
|
||||
AppendIfExistsOption(boolean value) {
|
||||
super(value);
|
||||
}
|
||||
}
|
||||
|
||||
static class KeyClassOption extends Options.ClassOption implements Option {
|
||||
KeyClassOption(Class<?> value) {
|
||||
super(value);
|
||||
|
@ -984,6 +993,10 @@ public class SequenceFile {
|
|||
return new ReplicationOption(value);
|
||||
}
|
||||
|
||||
public static Option appendIfExists(boolean value) {
|
||||
return new AppendIfExistsOption(value);
|
||||
}
|
||||
|
||||
public static Option blockSize(long value) {
|
||||
return new BlockSizeOption(value);
|
||||
}
|
||||
|
@ -1030,6 +1043,8 @@ public class SequenceFile {
|
|||
ProgressableOption progressOption =
|
||||
Options.getOption(ProgressableOption.class, opts);
|
||||
FileOption fileOption = Options.getOption(FileOption.class, opts);
|
||||
AppendIfExistsOption appendIfExistsOption = Options.getOption(
|
||||
AppendIfExistsOption.class, opts);
|
||||
FileSystemOption fsOption = Options.getOption(FileSystemOption.class, opts);
|
||||
StreamOption streamOption = Options.getOption(StreamOption.class, opts);
|
||||
KeyClassOption keyClassOption =
|
||||
|
@ -1071,7 +1086,54 @@ public class SequenceFile {
|
|||
blockSizeOption.getValue();
|
||||
Progressable progress = progressOption == null ? null :
|
||||
progressOption.getValue();
|
||||
out = fs.create(p, true, bufferSize, replication, blockSize, progress);
|
||||
|
||||
if (appendIfExistsOption != null && appendIfExistsOption.getValue()
|
||||
&& fs.exists(p)) {
|
||||
|
||||
// Read the file and verify header details
|
||||
SequenceFile.Reader reader = new SequenceFile.Reader(conf,
|
||||
SequenceFile.Reader.file(p), new Reader.OnlyHeaderOption());
|
||||
try {
|
||||
|
||||
if (keyClassOption.getValue() != reader.getKeyClass()
|
||||
|| valueClassOption.getValue() != reader.getValueClass()) {
|
||||
throw new IllegalArgumentException(
|
||||
"Key/value class provided does not match the file");
|
||||
}
|
||||
|
||||
if (reader.getVersion() != VERSION[3]) {
|
||||
throw new VersionMismatchException(VERSION[3],
|
||||
reader.getVersion());
|
||||
}
|
||||
|
||||
if (metadataOption != null) {
|
||||
LOG.info("MetaData Option is ignored during append");
|
||||
}
|
||||
metadataOption = (MetadataOption) SequenceFile.Writer
|
||||
.metadata(reader.getMetadata());
|
||||
|
||||
CompressionOption readerCompressionOption = new CompressionOption(
|
||||
reader.getCompressionType(), reader.getCompressionCodec());
|
||||
|
||||
if (readerCompressionOption.value != compressionTypeOption.value
|
||||
|| !readerCompressionOption.codec.getClass().getName()
|
||||
.equals(compressionTypeOption.codec.getClass().getName())) {
|
||||
throw new IllegalArgumentException(
|
||||
"Compression option provided does not match the file");
|
||||
}
|
||||
|
||||
sync = reader.getSync();
|
||||
|
||||
} finally {
|
||||
reader.close();
|
||||
}
|
||||
|
||||
out = fs.append(p, bufferSize, progress);
|
||||
this.appendMode = true;
|
||||
} else {
|
||||
out = fs
|
||||
.create(p, true, bufferSize, replication, blockSize, progress);
|
||||
}
|
||||
} else {
|
||||
out = streamOption.getValue();
|
||||
}
|
||||
|
@ -1214,7 +1276,12 @@ public class SequenceFile {
|
|||
}
|
||||
this.compressedValSerializer.open(deflateOut);
|
||||
}
|
||||
writeFileHeader();
|
||||
|
||||
if (appendMode) {
|
||||
sync();
|
||||
} else {
|
||||
writeFileHeader();
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns the class of keys in this file. */
|
||||
|
@ -2045,6 +2112,14 @@ public class SequenceFile {
|
|||
/** Returns the compression codec of data in this file. */
|
||||
public CompressionCodec getCompressionCodec() { return codec; }
|
||||
|
||||
private byte[] getSync() {
|
||||
return sync;
|
||||
}
|
||||
|
||||
private byte getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the compression type for this file.
|
||||
* @return the compression type
|
||||
|
|
|
@ -53,7 +53,7 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
public class Text extends BinaryComparable
|
||||
implements WritableComparable<BinaryComparable> {
|
||||
|
||||
private static ThreadLocal<CharsetEncoder> ENCODER_FACTORY =
|
||||
private static final ThreadLocal<CharsetEncoder> ENCODER_FACTORY =
|
||||
new ThreadLocal<CharsetEncoder>() {
|
||||
@Override
|
||||
protected CharsetEncoder initialValue() {
|
||||
|
@ -63,7 +63,7 @@ public class Text extends BinaryComparable
|
|||
}
|
||||
};
|
||||
|
||||
private static ThreadLocal<CharsetDecoder> DECODER_FACTORY =
|
||||
private static final ThreadLocal<CharsetDecoder> DECODER_FACTORY =
|
||||
new ThreadLocal<CharsetDecoder>() {
|
||||
@Override
|
||||
protected CharsetDecoder initialValue() {
|
||||
|
|
|
@ -44,7 +44,7 @@ public class Bzip2Factory {
|
|||
* @return <code>true</code> if native-bzip2 is loaded & initialized
|
||||
* and can be loaded for this job, else <code>false</code>
|
||||
*/
|
||||
public static boolean isNativeBzip2Loaded(Configuration conf) {
|
||||
public static synchronized boolean isNativeBzip2Loaded(Configuration conf) {
|
||||
String libname = conf.get("io.compression.codec.bzip2.library",
|
||||
"system-native");
|
||||
if (!bzip2LibraryName.equals(libname)) {
|
||||
|
|
|
@ -56,54 +56,54 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
@InterfaceStability.Unstable
|
||||
public class NativeIO {
|
||||
public static class POSIX {
|
||||
// Flags for open() call from bits/fcntl.h
|
||||
public static final int O_RDONLY = 00;
|
||||
public static final int O_WRONLY = 01;
|
||||
public static final int O_RDWR = 02;
|
||||
public static final int O_CREAT = 0100;
|
||||
public static final int O_EXCL = 0200;
|
||||
public static final int O_NOCTTY = 0400;
|
||||
public static final int O_TRUNC = 01000;
|
||||
public static final int O_APPEND = 02000;
|
||||
public static final int O_NONBLOCK = 04000;
|
||||
public static final int O_SYNC = 010000;
|
||||
public static final int O_ASYNC = 020000;
|
||||
public static final int O_FSYNC = O_SYNC;
|
||||
public static final int O_NDELAY = O_NONBLOCK;
|
||||
// Flags for open() call from bits/fcntl.h - Set by JNI
|
||||
public static int O_RDONLY = -1;
|
||||
public static int O_WRONLY = -1;
|
||||
public static int O_RDWR = -1;
|
||||
public static int O_CREAT = -1;
|
||||
public static int O_EXCL = -1;
|
||||
public static int O_NOCTTY = -1;
|
||||
public static int O_TRUNC = -1;
|
||||
public static int O_APPEND = -1;
|
||||
public static int O_NONBLOCK = -1;
|
||||
public static int O_SYNC = -1;
|
||||
|
||||
// Flags for posix_fadvise() from bits/fcntl.h
|
||||
// Flags for posix_fadvise() from bits/fcntl.h - Set by JNI
|
||||
/* No further special treatment. */
|
||||
public static final int POSIX_FADV_NORMAL = 0;
|
||||
public static int POSIX_FADV_NORMAL = -1;
|
||||
/* Expect random page references. */
|
||||
public static final int POSIX_FADV_RANDOM = 1;
|
||||
public static int POSIX_FADV_RANDOM = -1;
|
||||
/* Expect sequential page references. */
|
||||
public static final int POSIX_FADV_SEQUENTIAL = 2;
|
||||
public static int POSIX_FADV_SEQUENTIAL = -1;
|
||||
/* Will need these pages. */
|
||||
public static final int POSIX_FADV_WILLNEED = 3;
|
||||
public static int POSIX_FADV_WILLNEED = -1;
|
||||
/* Don't need these pages. */
|
||||
public static final int POSIX_FADV_DONTNEED = 4;
|
||||
public static int POSIX_FADV_DONTNEED = -1;
|
||||
/* Data will be accessed once. */
|
||||
public static final int POSIX_FADV_NOREUSE = 5;
|
||||
public static int POSIX_FADV_NOREUSE = -1;
|
||||
|
||||
|
||||
// Updated by JNI when supported by glibc. Leave defaults in case kernel
|
||||
// supports sync_file_range, but glibc does not.
|
||||
/* Wait upon writeout of all pages
|
||||
in the range before performing the
|
||||
write. */
|
||||
public static final int SYNC_FILE_RANGE_WAIT_BEFORE = 1;
|
||||
public static int SYNC_FILE_RANGE_WAIT_BEFORE = 1;
|
||||
/* Initiate writeout of all those
|
||||
dirty pages in the range which are
|
||||
not presently under writeback. */
|
||||
public static final int SYNC_FILE_RANGE_WRITE = 2;
|
||||
|
||||
public static int SYNC_FILE_RANGE_WRITE = 2;
|
||||
/* Wait upon writeout of all pages in
|
||||
the range after performing the
|
||||
write. */
|
||||
public static final int SYNC_FILE_RANGE_WAIT_AFTER = 4;
|
||||
public static int SYNC_FILE_RANGE_WAIT_AFTER = 4;
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(NativeIO.class);
|
||||
|
||||
// Set to true via JNI if possible
|
||||
public static boolean fadvisePossible = false;
|
||||
|
||||
private static boolean nativeLoaded = false;
|
||||
private static boolean fadvisePossible = true;
|
||||
private static boolean syncFileRangePossible = true;
|
||||
|
||||
static final String WORKAROUND_NON_THREADSAFE_CALLS_KEY =
|
||||
|
@ -265,8 +265,6 @@ public class NativeIO {
|
|||
if (nativeLoaded && fadvisePossible) {
|
||||
try {
|
||||
posix_fadvise(fd, offset, len, flags);
|
||||
} catch (UnsupportedOperationException uoe) {
|
||||
fadvisePossible = false;
|
||||
} catch (UnsatisfiedLinkError ule) {
|
||||
fadvisePossible = false;
|
||||
}
|
||||
|
@ -347,22 +345,21 @@ public class NativeIO {
|
|||
private String owner, group;
|
||||
private int mode;
|
||||
|
||||
// Mode constants
|
||||
public static final int S_IFMT = 0170000; /* type of file */
|
||||
public static final int S_IFIFO = 0010000; /* named pipe (fifo) */
|
||||
public static final int S_IFCHR = 0020000; /* character special */
|
||||
public static final int S_IFDIR = 0040000; /* directory */
|
||||
public static final int S_IFBLK = 0060000; /* block special */
|
||||
public static final int S_IFREG = 0100000; /* regular */
|
||||
public static final int S_IFLNK = 0120000; /* symbolic link */
|
||||
public static final int S_IFSOCK = 0140000; /* socket */
|
||||
public static final int S_IFWHT = 0160000; /* whiteout */
|
||||
public static final int S_ISUID = 0004000; /* set user id on execution */
|
||||
public static final int S_ISGID = 0002000; /* set group id on execution */
|
||||
public static final int S_ISVTX = 0001000; /* save swapped text even after use */
|
||||
public static final int S_IRUSR = 0000400; /* read permission, owner */
|
||||
public static final int S_IWUSR = 0000200; /* write permission, owner */
|
||||
public static final int S_IXUSR = 0000100; /* execute/search permission, owner */
|
||||
// Mode constants - Set by JNI
|
||||
public static int S_IFMT = -1; /* type of file */
|
||||
public static int S_IFIFO = -1; /* named pipe (fifo) */
|
||||
public static int S_IFCHR = -1; /* character special */
|
||||
public static int S_IFDIR = -1; /* directory */
|
||||
public static int S_IFBLK = -1; /* block special */
|
||||
public static int S_IFREG = -1; /* regular */
|
||||
public static int S_IFLNK = -1; /* symbolic link */
|
||||
public static int S_IFSOCK = -1; /* socket */
|
||||
public static int S_ISUID = -1; /* set user id on execution */
|
||||
public static int S_ISGID = -1; /* set group id on execution */
|
||||
public static int S_ISVTX = -1; /* save swapped text even after use */
|
||||
public static int S_IRUSR = -1; /* read permission, owner */
|
||||
public static int S_IWUSR = -1; /* write permission, owner */
|
||||
public static int S_IXUSR = -1; /* execute/search permission, owner */
|
||||
|
||||
Stat(int ownerId, int groupId, int mode) {
|
||||
this.ownerId = ownerId;
|
||||
|
@ -881,6 +878,17 @@ public class NativeIO {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a hardlink "dst" that points to "src".
|
||||
*
|
||||
* This is deprecated since JDK7 NIO can create hardlinks via the
|
||||
* {@link java.nio.file.Files} API.
|
||||
*
|
||||
* @param src source file
|
||||
* @param dst hardlink location
|
||||
* @throws IOException
|
||||
*/
|
||||
@Deprecated
|
||||
public static void link(File src, File dst) throws IOException {
|
||||
if (!nativeLoaded) {
|
||||
HardLink.createHardLink(src, dst);
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.io.retry;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Holder class that clients can use to return multiple exceptions.
|
||||
*/
|
||||
public class MultiException extends IOException {
|
||||
|
||||
private final Map<String, Exception> exes;
|
||||
|
||||
public MultiException(Map<String, Exception> exes) {
|
||||
this.exes = exes;
|
||||
}
|
||||
|
||||
public Map<String, Exception> getExceptions() {
|
||||
return exes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder("{");
|
||||
for (Exception e : exes.values()) {
|
||||
sb.append(e.toString()).append(", ");
|
||||
}
|
||||
sb.append("}");
|
||||
return "MultiException[" + sb.toString() + "]";
|
||||
}
|
||||
}
|
|
@ -23,6 +23,8 @@ import java.lang.reflect.InvocationTargetException;
|
|||
import java.lang.reflect.Method;
|
||||
import java.lang.reflect.Proxy;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -101,7 +103,7 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
|
|||
Object ret = invokeMethod(method, args);
|
||||
hasMadeASuccessfulCall = true;
|
||||
return ret;
|
||||
} catch (Exception e) {
|
||||
} catch (Exception ex) {
|
||||
boolean isIdempotentOrAtMostOnce = proxyProvider.getInterface()
|
||||
.getMethod(method.getName(), method.getParameterTypes())
|
||||
.isAnnotationPresent(Idempotent.class);
|
||||
|
@ -110,15 +112,16 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
|
|||
.getMethod(method.getName(), method.getParameterTypes())
|
||||
.isAnnotationPresent(AtMostOnce.class);
|
||||
}
|
||||
RetryAction action = policy.shouldRetry(e, retries++,
|
||||
invocationFailoverCount, isIdempotentOrAtMostOnce);
|
||||
if (action.action == RetryAction.RetryDecision.FAIL) {
|
||||
if (action.reason != null) {
|
||||
List<RetryAction> actions = extractActions(policy, ex, retries++,
|
||||
invocationFailoverCount, isIdempotentOrAtMostOnce);
|
||||
RetryAction failAction = getFailAction(actions);
|
||||
if (failAction != null) {
|
||||
if (failAction.reason != null) {
|
||||
LOG.warn("Exception while invoking " + currentProxy.proxy.getClass()
|
||||
+ "." + method.getName() + " over " + currentProxy.proxyInfo
|
||||
+ ". Not retrying because " + action.reason, e);
|
||||
+ ". Not retrying because " + failAction.reason, ex);
|
||||
}
|
||||
throw e;
|
||||
throw ex;
|
||||
} else { // retry or failover
|
||||
// avoid logging the failover if this is the first call on this
|
||||
// proxy object, and we successfully achieve the failover without
|
||||
|
@ -126,8 +129,9 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
|
|||
boolean worthLogging =
|
||||
!(invocationFailoverCount == 0 && !hasMadeASuccessfulCall);
|
||||
worthLogging |= LOG.isDebugEnabled();
|
||||
if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY &&
|
||||
worthLogging) {
|
||||
RetryAction failOverAction = getFailOverAction(actions);
|
||||
long delay = getDelayMillis(actions);
|
||||
if (failOverAction != null && worthLogging) {
|
||||
String msg = "Exception while invoking " + method.getName()
|
||||
+ " of class " + currentProxy.proxy.getClass().getSimpleName()
|
||||
+ " over " + currentProxy.proxyInfo;
|
||||
|
@ -135,22 +139,22 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
|
|||
if (invocationFailoverCount > 0) {
|
||||
msg += " after " + invocationFailoverCount + " fail over attempts";
|
||||
}
|
||||
msg += ". Trying to fail over " + formatSleepMessage(action.delayMillis);
|
||||
LOG.info(msg, e);
|
||||
msg += ". Trying to fail over " + formatSleepMessage(delay);
|
||||
LOG.info(msg, ex);
|
||||
} else {
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Exception while invoking " + method.getName()
|
||||
+ " of class " + currentProxy.proxy.getClass().getSimpleName()
|
||||
+ " over " + currentProxy.proxyInfo + ". Retrying "
|
||||
+ formatSleepMessage(action.delayMillis), e);
|
||||
+ formatSleepMessage(delay), ex);
|
||||
}
|
||||
}
|
||||
|
||||
if (action.delayMillis > 0) {
|
||||
Thread.sleep(action.delayMillis);
|
||||
if (delay > 0) {
|
||||
Thread.sleep(delay);
|
||||
}
|
||||
|
||||
if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY) {
|
||||
if (failOverAction != null) {
|
||||
// Make sure that concurrent failed method invocations only cause a
|
||||
// single actual fail over.
|
||||
synchronized (proxyProvider) {
|
||||
|
@ -170,6 +174,67 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtain a retry delay from list of RetryActions.
|
||||
*/
|
||||
private long getDelayMillis(List<RetryAction> actions) {
|
||||
long retVal = 0;
|
||||
for (RetryAction action : actions) {
|
||||
if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY ||
|
||||
action.action == RetryAction.RetryDecision.RETRY) {
|
||||
if (action.delayMillis > retVal) {
|
||||
retVal = action.delayMillis;
|
||||
}
|
||||
}
|
||||
}
|
||||
return retVal;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the first FAILOVER_AND_RETRY action.
|
||||
*/
|
||||
private RetryAction getFailOverAction(List<RetryAction> actions) {
|
||||
for (RetryAction action : actions) {
|
||||
if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY) {
|
||||
return action;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the last FAIL action.. only if there are no RETRY actions.
|
||||
*/
|
||||
private RetryAction getFailAction(List<RetryAction> actions) {
|
||||
RetryAction fAction = null;
|
||||
for (RetryAction action : actions) {
|
||||
if (action.action == RetryAction.RetryDecision.FAIL) {
|
||||
fAction = action;
|
||||
} else {
|
||||
// Atleast 1 RETRY
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return fAction;
|
||||
}
|
||||
|
||||
private List<RetryAction> extractActions(RetryPolicy policy, Exception ex,
|
||||
int i, int invocationFailoverCount,
|
||||
boolean isIdempotentOrAtMostOnce)
|
||||
throws Exception {
|
||||
List<RetryAction> actions = new LinkedList<>();
|
||||
if (ex instanceof MultiException) {
|
||||
for (Exception th : ((MultiException) ex).getExceptions().values()) {
|
||||
actions.add(policy.shouldRetry(th, i, invocationFailoverCount,
|
||||
isIdempotentOrAtMostOnce));
|
||||
}
|
||||
} else {
|
||||
actions.add(policy.shouldRetry(ex, i,
|
||||
invocationFailoverCount, isIdempotentOrAtMostOnce));
|
||||
}
|
||||
return actions;
|
||||
}
|
||||
|
||||
private static String formatSleepMessage(long millis) {
|
||||
if (millis > 0) {
|
||||
return "after sleeping for " + millis + "ms.";
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.ipc.RemoteException;
|
|||
import org.apache.hadoop.ipc.RetriableException;
|
||||
import org.apache.hadoop.ipc.StandbyException;
|
||||
import org.apache.hadoop.net.ConnectTimeoutException;
|
||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
|
@ -575,6 +576,9 @@ public class RetryPolicies {
|
|||
// RetriableException or RetriableException wrapped
|
||||
return new RetryAction(RetryAction.RetryDecision.RETRY,
|
||||
getFailoverOrRetrySleepTime(retries));
|
||||
} else if (e instanceof InvalidToken) {
|
||||
return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
|
||||
"Invalid or Cancelled Token");
|
||||
} else if (e instanceof SocketException
|
||||
|| (e instanceof IOException && !(e instanceof RemoteException))) {
|
||||
if (isIdempotentOrAtMostOnce) {
|
||||
|
@ -620,7 +624,7 @@ public class RetryPolicies {
|
|||
return unwrapped instanceof StandbyException;
|
||||
}
|
||||
|
||||
private static RetriableException getWrappedRetriableException(Exception e) {
|
||||
static RetriableException getWrappedRetriableException(Exception e) {
|
||||
if (!(e instanceof RemoteException)) {
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
||||
import com.google.protobuf.ServiceException;
|
||||
import org.apache.hadoop.ipc.RetriableException;
|
||||
|
||||
public class RetryUtils {
|
||||
public static final Log LOG = LogFactory.getLog(RetryUtils.class);
|
||||
|
@ -92,7 +93,11 @@ public class RetryUtils {
|
|||
|
||||
//see (1) and (2) in the javadoc of this method.
|
||||
final RetryPolicy p;
|
||||
if (e instanceof RemoteException) {
|
||||
if (e instanceof RetriableException
|
||||
|| RetryPolicies.getWrappedRetriableException(e) != null) {
|
||||
// RetriableException or RetriableException wrapped
|
||||
p = multipleLinearRandomRetry;
|
||||
} else if (e instanceof RemoteException) {
|
||||
final RemoteException re = (RemoteException)e;
|
||||
p = remoteExceptionToRetry.equals(re.getClassName())?
|
||||
multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL;
|
||||
|
|
|
@ -32,11 +32,15 @@ import org.apache.hadoop.conf.Configuration;
|
|||
*/
|
||||
public class CallQueueManager<E> {
|
||||
public static final Log LOG = LogFactory.getLog(CallQueueManager.class);
|
||||
// Number of checkpoints for empty queue.
|
||||
private static final int CHECKPOINT_NUM = 20;
|
||||
// Interval to check empty queue.
|
||||
private static final long CHECKPOINT_INTERVAL_MS = 10;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static <E> Class<? extends BlockingQueue<E>> convertQueueClass(
|
||||
Class<?> queneClass, Class<E> elementClass) {
|
||||
return (Class<? extends BlockingQueue<E>>)queneClass;
|
||||
Class<?> queueClass, Class<E> elementClass) {
|
||||
return (Class<? extends BlockingQueue<E>>)queueClass;
|
||||
}
|
||||
private final boolean clientBackOffEnabled;
|
||||
|
||||
|
@ -159,18 +163,23 @@ public class CallQueueManager<E> {
|
|||
}
|
||||
|
||||
/**
|
||||
* Checks if queue is empty by checking at two points in time.
|
||||
* Checks if queue is empty by checking at CHECKPOINT_NUM points with
|
||||
* CHECKPOINT_INTERVAL_MS interval.
|
||||
* This doesn't mean the queue might not fill up at some point later, but
|
||||
* it should decrease the probability that we lose a call this way.
|
||||
*/
|
||||
private boolean queueIsReallyEmpty(BlockingQueue<?> q) {
|
||||
boolean wasEmpty = q.isEmpty();
|
||||
try {
|
||||
Thread.sleep(10);
|
||||
} catch (InterruptedException ie) {
|
||||
return false;
|
||||
for (int i = 0; i < CHECKPOINT_NUM; i++) {
|
||||
try {
|
||||
Thread.sleep(CHECKPOINT_INTERVAL_MS);
|
||||
} catch (InterruptedException ie) {
|
||||
return false;
|
||||
}
|
||||
if (!q.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return q.isEmpty() && wasEmpty;
|
||||
return true;
|
||||
}
|
||||
|
||||
private String stringRepr(Object o) {
|
||||
|
|
|
@ -1484,7 +1484,13 @@ public class Client {
|
|||
}
|
||||
});
|
||||
} catch (ExecutionException e) {
|
||||
throw new IOException(e);
|
||||
Throwable cause = e.getCause();
|
||||
// the underlying exception should normally be IOException
|
||||
if (cause instanceof IOException) {
|
||||
throw (IOException) cause;
|
||||
} else {
|
||||
throw new IOException(cause);
|
||||
}
|
||||
}
|
||||
if (connection.addCall(call)) {
|
||||
break;
|
||||
|
|
|
@ -238,7 +238,7 @@ public class ProtobufRpcEngine implements RpcEngine {
|
|||
}
|
||||
if (Trace.isTracing()) {
|
||||
traceScope.getSpan().addTimelineAnnotation(
|
||||
"Call got exception: " + e.getMessage());
|
||||
"Call got exception: " + e.toString());
|
||||
}
|
||||
throw new ServiceException(e);
|
||||
} finally {
|
||||
|
|
|
@ -210,4 +210,28 @@ public class RpcClientUtil {
|
|||
}
|
||||
return clazz.getSimpleName() + "#" + method.getName();
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert an RPC class method to a string.
|
||||
* The format we want is
|
||||
* 'SecondOutermostClassShortName#OutermostClassShortName'.
|
||||
*
|
||||
* For example, if the full class name is:
|
||||
* org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations
|
||||
*
|
||||
* the format we want is:
|
||||
* ClientProtocol#getBlockLocations
|
||||
*/
|
||||
public static String toTraceName(String fullName) {
|
||||
int lastPeriod = fullName.lastIndexOf('.');
|
||||
if (lastPeriod < 0) {
|
||||
return fullName;
|
||||
}
|
||||
int secondLastPeriod = fullName.lastIndexOf('.', lastPeriod - 1);
|
||||
if (secondLastPeriod < 0) {
|
||||
return fullName;
|
||||
}
|
||||
return fullName.substring(secondLastPeriod + 1, lastPeriod) + "#" +
|
||||
fullName.substring(lastPeriod + 1);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1963,7 +1963,9 @@ public abstract class Server {
|
|||
// If the incoming RPC included tracing info, always continue the trace
|
||||
TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(),
|
||||
header.getTraceInfo().getParentId());
|
||||
traceSpan = Trace.startSpan(rpcRequest.toString(), parentSpan).detach();
|
||||
traceSpan = Trace.startSpan(
|
||||
RpcClientUtil.toTraceName(rpcRequest.toString()),
|
||||
parentSpan).detach();
|
||||
}
|
||||
|
||||
Call call = new Call(header.getCallId(), header.getRetryCount(),
|
||||
|
@ -2153,6 +2155,7 @@ public abstract class Server {
|
|||
CurCall.set(call);
|
||||
if (call.traceSpan != null) {
|
||||
traceScope = Trace.continueSpan(call.traceSpan);
|
||||
traceScope.getSpan().addTimelineAnnotation("called");
|
||||
}
|
||||
|
||||
try {
|
||||
|
|
|
@ -85,7 +85,7 @@ public class RpcMetrics {
|
|||
@Metric("Number of sent bytes") MutableCounterLong sentBytes;
|
||||
@Metric("Queue time") MutableRate rpcQueueTime;
|
||||
MutableQuantiles[] rpcQueueTimeMillisQuantiles;
|
||||
@Metric("Processsing time") MutableRate rpcProcessingTime;
|
||||
@Metric("Processing time") MutableRate rpcProcessingTime;
|
||||
MutableQuantiles[] rpcProcessingTimeMillisQuantiles;
|
||||
@Metric("Number of authentication failures")
|
||||
MutableCounterLong rpcAuthenticationFailures;
|
||||
|
@ -93,7 +93,7 @@ public class RpcMetrics {
|
|||
MutableCounterLong rpcAuthenticationSuccesses;
|
||||
@Metric("Number of authorization failures")
|
||||
MutableCounterLong rpcAuthorizationFailures;
|
||||
@Metric("Number of authorization sucesses")
|
||||
@Metric("Number of authorization successes")
|
||||
MutableCounterLong rpcAuthorizationSuccesses;
|
||||
@Metric("Number of client backoff requests")
|
||||
MutableCounterLong rpcClientBackoff;
|
||||
|
@ -108,7 +108,7 @@ public class RpcMetrics {
|
|||
|
||||
// Public instrumentation methods that could be extracted to an
|
||||
// abstract class if we decide to do custom instrumentation classes a la
|
||||
// JobTrackerInstrumenation. The methods with //@Override comment are
|
||||
// JobTrackerInstrumentation. The methods with //@Override comment are
|
||||
// candidates for abstract methods in a abstract instrumentation class.
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,4 +19,7 @@
|
|||
* This package provides access to JMX primarily through the
|
||||
* {@link org.apache.hadoop.jmx.JMXJsonServlet} class.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
package org.apache.hadoop.jmx;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|