Merging r1527684 through r1532876 from trunk to branch HDFS-2832

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1532910 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2013-10-16 21:07:28 +00:00
commit 168b133337
340 changed files with 14177 additions and 3473 deletions

1
.gitattributes vendored
View File

@ -15,5 +15,6 @@
*.bat text eol=crlf *.bat text eol=crlf
*.cmd text eol=crlf *.cmd text eol=crlf
*.vcxproj text merge=union eol=crlf
*.csproj text merge=union eol=crlf *.csproj text merge=union eol=crlf
*.sln text merge=union eol=crlf *.sln text merge=union eol=crlf

View File

@ -315,9 +315,6 @@ Release 2.3.0 - UNRELEASED
HADOOP-9435. Support building the JNI code against the IBM JVM. HADOOP-9435. Support building the JNI code against the IBM JVM.
(Tian Hong Wang via Colin Patrick McCabe) (Tian Hong Wang via Colin Patrick McCabe)
HADOOP-9758. Provide configuration option for FileSystem/FileContext
symlink resolution. (Andrew Wang via Colin Patrick McCabe)
HADOOP-9848. Create a MiniKDC for use with security testing. HADOOP-9848. Create a MiniKDC for use with security testing.
(ywskycn via tucu) (ywskycn via tucu)
@ -342,6 +339,24 @@ Release 2.3.0 - UNRELEASED
HADOOP-10006. Compilation failure in trunk for HADOOP-10006. Compilation failure in trunk for
o.a.h.fs.swift.util.JSONUtil (Junping Du via stevel) o.a.h.fs.swift.util.JSONUtil (Junping Du via stevel)
HADOOP-9063. enhance unit-test coverage of class
org.apache.hadoop.fs.FileUtil (Ivan A. Veselovsky via jlowe)
HADOOP-9254. Cover packages org.apache.hadoop.util.bloom,
org.apache.hadoop.util.hash (Vadim Bondarev via jlowe)
HADOOP-9225. Cover package org.apache.hadoop.compress.Snappy (Vadim
Bondarev, Andrey Klochkov and Nathan Roberts via jlowe)
HADOOP-9199. Cover package org.apache.hadoop.io with unit tests (Andrey
Klochkov via jeagles)
HADOOP-9470. eliminate duplicate FQN tests in different Hadoop modules
(Ivan A. Veselovsky via daryn)
HADOOP-9494. Excluded auto-generated and examples code from clover reports
(Andrey Klochkov via jeagles)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn) HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
@ -384,7 +399,7 @@ Release 2.3.0 - UNRELEASED
HADOOP-9981. globStatus should minimize its listStatus and getFileStatus HADOOP-9981. globStatus should minimize its listStatus and getFileStatus
calls. (Contributed by Colin Patrick McCabe) calls. (Contributed by Colin Patrick McCabe)
Release 2.2.0 - UNRELEASED Release 2.2.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -392,11 +407,28 @@ Release 2.2.0 - UNRELEASED
IMPROVEMENTS IMPROVEMENTS
HADOOP-10046. Print a log message when SSL is enabled.
(David S. Wang via wang)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
Release 2.1.2 - UNRELEASED HADOOP-10028. Malformed ssl-server.xml.example. (Haohui Mai via jing9)
HADOOP-10030. FsShell -put/copyFromLocal should support Windows local path.
(Chuan Liu via cnauroth)
HADOOP-10031. FsShell -get/copyToLocal/moveFromLocal should support Windows
local path. (Chuan Liu via cnauroth)
HADOOP-10039. Add Hive to the list of projects using
AbstractDelegationTokenSecretManager. (Haohui Mai via jing9)
HADOOP-10040. hadoop.cmd in UNIX format and would not run by default on
Windows. (cnauroth)
Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -410,6 +442,12 @@ Release 2.1.2 - UNRELEASED
HADOOP-9976. Different versions of avro and avro-maven-plugin (Karthik HADOOP-9976. Different versions of avro and avro-maven-plugin (Karthik
Kambatla via Sandy Ryza) Kambatla via Sandy Ryza)
HADOOP-9758. Provide configuration option for FileSystem/FileContext
symlink resolution. (Andrew Wang via Colin Patrick McCabe)
HADOOP-8315. Support SASL-authenticated ZooKeeper in ActiveStandbyElector
(todd)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -420,6 +458,12 @@ Release 2.1.2 - UNRELEASED
HADOOP-9761. ViewFileSystem#rename fails when using DistributedFileSystem. HADOOP-9761. ViewFileSystem#rename fails when using DistributedFileSystem.
(Andrew Wang via Colin Patrick McCabe) (Andrew Wang via Colin Patrick McCabe)
HADOOP-10003. HarFileSystem.listLocatedStatus() fails.
(Jason Dere and suresh via suresh)
HADOOP-10017. Fix NPE in DFSClient#getDelegationToken when doing Distcp
from a secured cluster to an insecured cluster. (Haohui Mai via jing9)
Release 2.1.1-beta - 2013-09-23 Release 2.1.1-beta - 2013-09-23
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -464,6 +464,10 @@
<exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c</exclude> <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c</exclude>
<exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h</exclude> <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h</exclude>
<exclude>src/test/java/org/apache/hadoop/fs/test-untar.tgz</exclude> <exclude>src/test/java/org/apache/hadoop/fs/test-untar.tgz</exclude>
<exclude>src/test/resources/test.har/_SUCCESS</exclude>
<exclude>src/test/resources/test.har/_index</exclude>
<exclude>src/test/resources/test.har/_masterindex</exclude>
<exclude>src/test/resources/test.har/part-0</exclude>
</excludes> </excludes>
</configuration> </configuration>
</plugin> </plugin>

View File

@ -44,6 +44,7 @@
<value>10000</value> <value>10000</value>
<description>Truststore reload check interval, in milliseconds. <description>Truststore reload check interval, in milliseconds.
Default value is 10000 (10 seconds). Default value is 10000 (10 seconds).
</description>
</property> </property>
<property> <property>

View File

@ -15,6 +15,622 @@
limitations under the License. limitations under the License.
--> -->
<META http-equiv="Content-Type" content="text/html; charset=UTF-8"> <META http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Hadoop 2.2.0 Release Notes</title>
<STYLE type="text/css">
H1 {font-family: sans-serif}
H2 {font-family: sans-serif; margin-left: 7mm}
TABLE {margin-left: 7mm}
</STYLE>
</head>
<body>
<h1>Hadoop 2.2.0 Release Notes</h1>
These release notes include new developer and user-facing incompatibilities, features, and major improvements.
<a name="changes"/>
<h2>Changes since Hadoop 2.1.1-beta</h2>
<ul>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1278">YARN-1278</a>.
Blocker bug reported by Yesha Vora and fixed by Hitesh Shah <br>
<b>New AM does not start after rm restart</b><br>
<blockquote>The new AM fails to start after RM restarts. It fails to start new Application master and job fails with below error.
/usr/bin/mapred job -status job_1380985373054_0001
13/10/05 15:04:04 INFO client.RMProxy: Connecting to ResourceManager at hostname
Job: job_1380985373054_0001
Job File: /user/abc/.staging/job_1380985373054_0001/job.xml
Job Tracking URL : http://hostname:8088/cluster/app/application_1380985373054_0001
Uber job : false
Number of maps: 0
Number of reduces: 0
map() completion: 0.0
reduce() completion: 0.0
Job state: FAILED
retired: false
reason for failure: There are no failed tasks for the job. Job is failed due to some other reason and reason can be found in the logs.
Counters: 0</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1277">YARN-1277</a>.
Major sub-task reported by Suresh Srinivas and fixed by Omkar Vinit Joshi <br>
<b>Add http policy support for YARN daemons</b><br>
<blockquote>This YARN part of HADOOP-10022.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1274">YARN-1274</a>.
Blocker bug reported by Alejandro Abdelnur and fixed by Siddharth Seth (nodemanager)<br>
<b>LCE fails to run containers that don't have resources to localize</b><br>
<blockquote>LCE container launch assumes the usercache/USER directory exists and it is owned by the user running the container process.
But the directory is created only if there are resources to localize by the LCE localization command, if there are not resourcdes to localize, LCE localization never executes and launching fails reporting 255 exit code and the NM logs have something like:
{code}
2013-10-04 14:07:56,425 INFO org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor: main : command provided 1
2013-10-04 14:07:56,425 INFO org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor: main : user is llama
2013-10-04 14:07:56,425 INFO org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor: Can't create directory llama in /yarn/nm/usercache/llama/appcache/application_1380853306301_0004/container_1380853306301_0004_01_000004 - Permission denied
{code}
</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1273">YARN-1273</a>.
Major bug reported by Hitesh Shah and fixed by Hitesh Shah <br>
<b>Distributed shell does not account for start container failures reported asynchronously.</b><br>
<blockquote>2013-10-04 22:09:15,234 ERROR [org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl #1] distributedshell.ApplicationMaster (ApplicationMaster.java:onStartContainerError(719)) - Failed to start Container container_1380920347574_0018_01_000006</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1271">YARN-1271</a>.
Major bug reported by Sandy Ryza and fixed by Sandy Ryza (nodemanager)<br>
<b>"Text file busy" errors launching containers again</b><br>
<blockquote>The error is shown below in the comments.
MAPREDUCE-2374 fixed this by removing "-c" when running the container launch script. It looks like the "-c" got brought back during the windows branch merge, so we should remove it again.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1262">YARN-1262</a>.
Major bug reported by Sandy Ryza and fixed by Karthik Kambatla <br>
<b>TestApplicationCleanup relies on all containers assigned in a single heartbeat</b><br>
<blockquote>TestApplicationCleanup submits container requests and waits for allocations to come in. It only sends a single node heartbeat to the node, expecting multiple containers to be assigned on this heartbeat, which not all schedulers do by default.
This is causing the test to fail when run with the Fair Scheduler.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1260">YARN-1260</a>.
Major sub-task reported by Yesha Vora and fixed by Omkar Vinit Joshi <br>
<b>RM_HOME link breaks when webapp.https.address related properties are not specified</b><br>
<blockquote>This issue happens in multiple node cluster where resource manager and node manager are running on different machines.
Steps to reproduce:
1) set yarn.resourcemanager.hostname = &lt;resourcemanager host&gt; in yarn-site.xml
2) set hadoop.ssl.enabled = true in core-site.xml
3) Do not specify below property in yarn-site.xml
yarn.nodemanager.webapp.https.address and yarn.resourcemanager.webapp.https.address
Here, the default value of above two property will be considered.
4) Go to nodemanager web UI "https://&lt;nodemanager host&gt;:8044/node"
5) Click on RM_HOME link
This link redirects to "https://&lt;nodemanager host&gt;:8090/cluster" instead "https://&lt;resourcemanager host&gt;:8090/cluster"
</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1256">YARN-1256</a>.
Critical sub-task reported by Bikas Saha and fixed by Xuan Gong <br>
<b>NM silently ignores non-existent service in StartContainerRequest</b><br>
<blockquote>A container can set token service metadata for a service, say shuffle_service. If that service does not exist then the errors is silently ignored. Later, when the next container wants to access data written to shuffle_service by the first task, then it fails because the service does not have the token that was supposed to be set by the first task.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1254">YARN-1254</a>.
Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Omkar Vinit Joshi <br>
<b>NM is polluting container's credentials</b><br>
<blockquote>Before launching the container, NM is using the same credential object and so is polluting what container should see. We should fix this.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1251">YARN-1251</a>.
Major bug reported by Junping Du and fixed by Xuan Gong (applications/distributed-shell)<br>
<b>TestDistributedShell#TestDSShell failed with timeout</b><br>
<blockquote>TestDistributedShell#TestDSShell on trunk Jenkins are failed consistently recently.
The Stacktrace is:
{code}
java.lang.Exception: test timed out after 90000 milliseconds
at com.google.protobuf.LiteralByteString.&lt;init&gt;(LiteralByteString.java:234)
at com.google.protobuf.ByteString.copyFromUtf8(ByteString.java:255)
at org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos$RequestHeaderProto.getMethodNameBytes(ProtobufRpcEngineProtos.java:286)
at org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos$RequestHeaderProto.getSerializedSize(ProtobufRpcEngineProtos.java:462)
at com.google.protobuf.AbstractMessageLite.writeDelimitedTo(AbstractMessageLite.java:84)
at org.apache.hadoop.ipc.ProtobufRpcEngine$RpcMessageWithHeader.write(ProtobufRpcEngine.java:302)
at org.apache.hadoop.ipc.Client$Connection.sendRpcRequest(Client.java:989)
at org.apache.hadoop.ipc.Client.call(Client.java:1377)
at org.apache.hadoop.ipc.Client.call(Client.java:1357)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)
at $Proxy70.getApplicationReport(Unknown Source)
at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getApplicationReport(ApplicationClientProtocolPBClientImpl.java:137)
at sun.reflect.GeneratedMethodAccessor40.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:185)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:101)
at $Proxy71.getApplicationReport(Unknown Source)
at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getApplicationReport(YarnClientImpl.java:195)
at org.apache.hadoop.yarn.applications.distributedshell.Client.monitorApplication(Client.java:622)
at org.apache.hadoop.yarn.applications.distributedshell.Client.run(Client.java:597)
at org.apache.hadoop.yarn.applications.distributedshell.TestDistributedShell.testDSShell(TestDistributedShell.java:125)
{code}
For details, please refer:
https://builds.apache.org/job/PreCommit-YARN-Build/2039//testReport/</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1247">YARN-1247</a>.
Major bug reported by Roman Shaposhnik and fixed by Roman Shaposhnik (nodemanager)<br>
<b>test-container-executor has gotten out of sync with the changes to container-executor</b><br>
<blockquote>If run under the super-user account test-container-executor.c fails in multiple different places. It would be nice to fix it so that we have better testing of LCE functionality.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1246">YARN-1246</a>.
Minor improvement reported by Arpit Gupta and fixed by Arpit Gupta <br>
<b>Log application status in the rm log when app is done running</b><br>
<blockquote>Since there is no yarn history server it becomes difficult to determine what the status of an old application is. One has to be familiar with the state transition in yarn to know what means a success.
We should add a log at info level that captures what the finalStatus of an app is. This would be helpful while debugging applications if the RM has restarted and we no longer can use the UI.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1236">YARN-1236</a>.
Major bug reported by Sandy Ryza and fixed by Sandy Ryza (resourcemanager)<br>
<b>FairScheduler setting queue name in RMApp is not working </b><br>
<blockquote>The fair scheduler sometimes picks a different queue than the one an application was submitted to, such as when user-as-default-queue is turned on. It needs to update the queue name in the RMApp so that this choice will be reflected in the UI.
This isn't working because the scheduler is looking up the RMApp by application attempt id instead of app id and failing to find it.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1229">YARN-1229</a>.
Blocker bug reported by Tassapol Athiapinya and fixed by Xuan Gong (nodemanager)<br>
<b>Define constraints on Auxiliary Service names. Change ShuffleHandler service name from mapreduce.shuffle to mapreduce_shuffle.</b><br>
<blockquote>I run sleep job. If AM fails to start, this exception could occur:
13/09/20 11:00:23 INFO mapreduce.Job: Job job_1379673267098_0020 failed with state FAILED due to: Application application_1379673267098_0020 failed 1 times due to AM Container for appattempt_1379673267098_0020_000001 exited with exitCode: 1 due to: Exception from container-launch:
org.apache.hadoop.util.Shell$ExitCodeException: /myappcache/application_1379673267098_0020/container_1379673267098_0020_01_000001/launch_container.sh: line 12: export: `NM_AUX_SERVICE_mapreduce.shuffle=AAA0+gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
': not a valid identifier
at org.apache.hadoop.util.Shell.runCommand(Shell.java:464)
at org.apache.hadoop.util.Shell.run(Shell.java:379)
at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:589)
at org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:195)
at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:270)
at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:78)
at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
at java.util.concurrent.FutureTask.run(FutureTask.java:138)
at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
at java.lang.Thread.run(Thread.java:662)
.Failing this attempt.. Failing the application.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1228">YARN-1228</a>.
Major improvement reported by Sandy Ryza and fixed by Sandy Ryza (scheduler)<br>
<b>Clean up Fair Scheduler configuration loading</b><br>
<blockquote>Currently the Fair Scheduler is configured in two ways
* An allocations file that has a different format than the standard Hadoop configuration file, which makes it easier to specify hierarchical objects like queues and their properties.
* With properties like yarn.scheduler.fair.max.assign that are specified in the standard Hadoop configuration format.
The standard and default way of configuring it is to use fair-scheduler.xml as the allocations file and to put the yarn.scheduler properties in yarn-site.xml.
It is also possible to specify a different file as the allocations file, and to place the yarn.scheduler properties in fair-scheduler.xml, which will be interpreted as in the standard Hadoop configuration format. This flexibility is both confusing and unnecessary.
Additionally, the allocation file is loaded as fair-scheduler.xml from the classpath if it is not specified, but is loaded as a File if it is. This causes two problems
1. We see different behavior when not setting the yarn.scheduler.fair.allocation.file, and setting it to fair-scheduler.xml, which is its default.
2. Classloaders may choose to cache resources, which can break the reload logic when yarn.scheduler.fair.allocation.file is not specified.
We should never allow the yarn.scheduler properties to go into fair-scheduler.xml. And we should always load the allocations file as a file, not as a resource on the classpath. To preserve existing behavior and allow loading files from the classpath, we can look for files on the classpath, but strip of their scheme and interpret them as Files.
</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1221">YARN-1221</a>.
Major bug reported by Sandy Ryza and fixed by Siqi Li (resourcemanager , scheduler)<br>
<b>With Fair Scheduler, reserved MB reported in RM web UI increases indefinitely</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1219">YARN-1219</a>.
Major bug reported by shanyu zhao and fixed by shanyu zhao (nodemanager)<br>
<b>FSDownload changes file suffix making FileUtil.unTar() throw exception</b><br>
<blockquote>While running a Hive join operation on Yarn, I saw exception as described below. This is caused by FSDownload copy the files into a temp file and change the suffix into ".tmp" before unpacking it. In unpack(), it uses FileUtil.unTar() which will determine if the file is "gzipped" by looking at the file suffix:
{code}
boolean gzipped = inFile.toString().endsWith("gz");
{code}
To fix this problem, we can remove the ".tmp" in the temp file name.
Here is the detailed exception:
org.apache.commons.compress.archivers.tar.TarArchiveInputStream.getNextTarEntry(TarArchiveInputStream.java:240)
at org.apache.hadoop.fs.FileUtil.unTarUsingJava(FileUtil.java:676)
at org.apache.hadoop.fs.FileUtil.unTar(FileUtil.java:625)
at org.apache.hadoop.yarn.util.FSDownload.unpack(FSDownload.java:203)
at org.apache.hadoop.yarn.util.FSDownload.call(FSDownload.java:287)
at org.apache.hadoop.yarn.util.FSDownload.call(FSDownload.java:50)
at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334)
at java.util.concurrent.FutureTask.run(FutureTask.java:166)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334)
at java.util.concurrent.FutureTask.run(FutureTask.java:166)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1110)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:603)
at java.lang.Thread.run(Thread.java:722)</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1215">YARN-1215</a>.
Major bug reported by Chuan Liu and fixed by Chuan Liu (api)<br>
<b>Yarn URL should include userinfo</b><br>
<blockquote>In the {{org.apache.hadoop.yarn.api.records.URL}} class, we don't have an userinfo as part of the URL. When converting a {{java.net.URI}} object into the YARN URL object in {{ConverterUtils.getYarnUrlFromURI()}} method, we will set uri host as the url host. If the uri has a userinfo part, the userinfo is discarded. This will lead to information loss if the original uri has the userinfo, e.g. foo://username:password@example.com will be converted to foo://example.com and username/password information is lost during the conversion.
</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1214">YARN-1214</a>.
Critical sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
<b>Register ClientToken MasterKey in SecretManager after it is saved</b><br>
<blockquote>Currently, app attempt ClientToken master key is registered before it is saved. This can cause problem that before the master key is saved, client gets the token and RM also crashes, RM cannot reloads the master key back after it restarts as it is not saved. As a result, client is holding an invalid token.
We can register the client token master key after it is saved in the store.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1213">YARN-1213</a>.
Major improvement reported by Sandy Ryza and fixed by Sandy Ryza (scheduler)<br>
<b>Restore config to ban submitting to undeclared pools in the Fair Scheduler</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1204">YARN-1204</a>.
Major sub-task reported by Yesha Vora and fixed by Omkar Vinit Joshi <br>
<b>Need to add https port related property in Yarn</b><br>
<blockquote>There is no yarn property available to configure https port for Resource manager, nodemanager and history server. Currently, Yarn services uses the port defined for http [defined by 'mapreduce.jobhistory.webapp.address','yarn.nodemanager.webapp.address', 'yarn.resourcemanager.webapp.address'] for running services on https protocol.
Yarn should have list of property to assign https port for RM, NM and JHS.
It can be like below.
yarn.nodemanager.webapp.https.address
yarn.resourcemanager.webapp.https.address
mapreduce.jobhistory.webapp.https.address </blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1203">YARN-1203</a>.
Major sub-task reported by Yesha Vora and fixed by Omkar Vinit Joshi <br>
<b>Application Manager UI does not appear with Https enabled</b><br>
<blockquote>Need to add support to disable 'hadoop.ssl.enabled' for MR jobs.
A job should be able to run on http protocol by setting 'hadoop.ssl.enabled' property at job level.
</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1167">YARN-1167</a>.
Major bug reported by Tassapol Athiapinya and fixed by Xuan Gong (applications/distributed-shell)<br>
<b>Submitted distributed shell application shows appMasterHost = empty</b><br>
<blockquote>Submit distributed shell application. Once the application turns to be RUNNING state, app master host should not be empty. In reality, it is empty.
==console logs==
distributedshell.Client: Got application report from ASM for, appId=12, clientToAMToken=null, appDiagnostics=, appMasterHost=, appQueue=default, appMasterRpcPort=0, appStartTime=1378505161360, yarnAppState=RUNNING, distributedFinalState=UNDEFINED,
</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1157">YARN-1157</a>.
Major bug reported by Tassapol Athiapinya and fixed by Xuan Gong (resourcemanager)<br>
<b>ResourceManager UI has invalid tracking URL link for distributed shell application</b><br>
<blockquote>Submit YARN distributed shell application. Goto ResourceManager Web UI. The application definitely appears. In Tracking UI column, there will be history link. Click on that link. Instead of showing application master web UI, HTTP error 500 would appear.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1149">YARN-1149</a>.
Major bug reported by Ramya Sunil and fixed by Xuan Gong <br>
<b>NM throws InvalidStateTransitonException: Invalid event: APPLICATION_LOG_HANDLING_FINISHED at RUNNING</b><br>
<blockquote>When nodemanager receives a kill signal when an application has finished execution but log aggregation has not kicked in, InvalidStateTransitonException: Invalid event: APPLICATION_LOG_HANDLING_FINISHED at RUNNING is thrown
{noformat}
2013-08-25 20:45:00,875 INFO logaggregation.AppLogAggregatorImpl (AppLogAggregatorImpl.java:finishLogAggregation(254)) - Application just finished : application_1377459190746_0118
2013-08-25 20:45:00,876 INFO logaggregation.AppLogAggregatorImpl (AppLogAggregatorImpl.java:uploadLogsForContainer(105)) - Starting aggregate log-file for app application_1377459190746_0118 at /app-logs/foo/logs/application_1377459190746_0118/&lt;host&gt;_45454.tmp
2013-08-25 20:45:00,876 INFO logaggregation.LogAggregationService (LogAggregationService.java:stopAggregators(151)) - Waiting for aggregation to complete for application_1377459190746_0118
2013-08-25 20:45:00,891 INFO logaggregation.AppLogAggregatorImpl (AppLogAggregatorImpl.java:uploadLogsForContainer(122)) - Uploading logs for container container_1377459190746_0118_01_000004. Current good log dirs are /tmp/yarn/local
2013-08-25 20:45:00,915 INFO logaggregation.AppLogAggregatorImpl (AppLogAggregatorImpl.java:doAppLogAggregation(182)) - Finished aggregate log-file for app application_1377459190746_0118
2013-08-25 20:45:00,925 WARN application.Application (ApplicationImpl.java:handle(427)) - Can't handle this event at current state
org.apache.hadoop.yarn.state.InvalidStateTransitonException: Invalid event: APPLICATION_LOG_HANDLING_FINISHED at RUNNING
at org.apache.hadoop.yarn.state.StateMachineFactory.doTransition(StateMachineFactory.java:305)
at org.apache.hadoop.yarn.state.StateMachineFactory.access$300(StateMachineFactory.java:46)
at org.apache.hadoop.yarn.state.StateMachineFactory$InternalStateMachine.doTransition(StateMachineFactory.java:448)
at org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl.handle(ApplicationImpl.java:425)
at org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl.handle(ApplicationImpl.java:59)
at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl$ApplicationEventDispatcher.handle(ContainerManagerImpl.java:697)
at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl$ApplicationEventDispatcher.handle(ContainerManagerImpl.java:689)
at org.apache.hadoop.yarn.event.AsyncDispatcher.dispatch(AsyncDispatcher.java:134)
at org.apache.hadoop.yarn.event.AsyncDispatcher$1.run(AsyncDispatcher.java:81)
at java.lang.Thread.run(Thread.java:662)
2013-08-25 20:45:00,926 INFO application.Application (ApplicationImpl.java:handle(430)) - Application application_1377459190746_0118 transitioned from RUNNING to null
2013-08-25 20:45:00,927 WARN monitor.ContainersMonitorImpl (ContainersMonitorImpl.java:run(463)) - org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl is interrupted. Exiting.
2013-08-25 20:45:00,938 INFO ipc.Server (Server.java:stop(2437)) - Stopping server on 8040
{noformat}
</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1141">YARN-1141</a>.
Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
<b>Updating resource requests should be decoupled with updating blacklist</b><br>
<blockquote>Currently, in CapacityScheduler and FifoScheduler, blacklist is updated together with resource requests, only when the incoming resource requests are not empty. Therefore, when the incoming resource requests are empty, the blacklist will not be updated even when blacklist additions and removals are not empty.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1131">YARN-1131</a>.
Minor sub-task reported by Tassapol Athiapinya and fixed by Siddharth Seth (client)<br>
<b>$yarn logs command should return an appropriate error message if YARN application is still running</b><br>
<blockquote>In the case when log aggregation is enabled, if a user submits MapReduce job and runs $ yarn logs -applicationId &lt;app ID&gt; while the YARN application is running, the command will return no message and return user back to shell. It is nice to tell the user that log aggregation is in progress.
{code}
-bash-4.1$ /usr/bin/yarn logs -applicationId application_1377900193583_0002
-bash-4.1$
{code}
At the same time, if invalid application ID is given, YARN CLI should say that the application ID is incorrect rather than throwing NoSuchElementException.
{code}
$ /usr/bin/yarn logs -applicationId application_00000
Exception in thread "main" java.util.NoSuchElementException
at com.google.common.base.AbstractIterator.next(AbstractIterator.java:75)
at org.apache.hadoop.yarn.util.ConverterUtils.toApplicationId(ConverterUtils.java:124)
at org.apache.hadoop.yarn.util.ConverterUtils.toApplicationId(ConverterUtils.java:119)
at org.apache.hadoop.yarn.logaggregation.LogDumper.run(LogDumper.java:110)
at org.apache.hadoop.yarn.logaggregation.LogDumper.main(LogDumper.java:255)
{code}
</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1128">YARN-1128</a>.
Major bug reported by Sandy Ryza and fixed by Karthik Kambatla (scheduler)<br>
<b>FifoPolicy.computeShares throws NPE on empty list of Schedulables</b><br>
<blockquote>FifoPolicy gives all of a queue's share to the earliest-scheduled application.
{code}
Schedulable earliest = null;
for (Schedulable schedulable : schedulables) {
if (earliest == null ||
schedulable.getStartTime() &lt; earliest.getStartTime()) {
earliest = schedulable;
}
}
earliest.setFairShare(Resources.clone(totalResources));
{code}
If the queue has no schedulables in it, earliest will be left null, leading to an NPE on the last line.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1090">YARN-1090</a>.
Major bug reported by Yesha Vora and fixed by Jian He <br>
<b>Job does not get into Pending State</b><br>
<blockquote>When there is no resource available to run a job, next job should go in pending state. RM UI should show next job as pending app and the counter for the pending app should be incremented.
But Currently. Next job stays in ACCEPTED state and No AM has been assigned to this job.Though Pending App count is not incremented.
Running 'job status &lt;nextjob&gt;' shows job state=PREP.
$ mapred job -status job_1377122233385_0002
13/08/21 21:59:23 INFO client.RMProxy: Connecting to ResourceManager at host1/ip1
Job: job_1377122233385_0002
Job File: /ABC/.staging/job_1377122233385_0002/job.xml
Job Tracking URL : http://host1:port1/application_1377122233385_0002/
Uber job : false
Number of maps: 0
Number of reduces: 0
map() completion: 0.0
reduce() completion: 0.0
Job state: PREP
retired: false
reason for failure:</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1070">YARN-1070</a>.
Major sub-task reported by Hitesh Shah and fixed by Zhijie Shen (nodemanager)<br>
<b>ContainerImpl State Machine: Invalid event: CONTAINER_KILLED_ON_REQUEST at CONTAINER_CLEANEDUP_AFTER_KILL</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-1032">YARN-1032</a>.
Critical bug reported by Lohit Vijayarenu and fixed by Lohit Vijayarenu <br>
<b>NPE in RackResolve</b><br>
<blockquote>We found a case where our rack resolve script was not returning rack due to problem with resolving host address. This exception was see in RackResolver.java as NPE, ultimately caught in RMContainerAllocator.
{noformat}
2013-08-01 07:11:37,708 ERROR [RMCommunicator Allocator] org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: ERROR IN CONTACTING RM.
java.lang.NullPointerException
at org.apache.hadoop.yarn.util.RackResolver.coreResolve(RackResolver.java:99)
at org.apache.hadoop.yarn.util.RackResolver.resolve(RackResolver.java:92)
at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator$ScheduledRequests.assignMapsWithLocality(RMContainerAllocator.java:1039)
at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator$ScheduledRequests.assignContainers(RMContainerAllocator.java:925)
at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator$ScheduledRequests.assign(RMContainerAllocator.java:861)
at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator$ScheduledRequests.access$400(RMContainerAllocator.java:681)
at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:219)
at org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator$1.run(RMCommunicator.java:243)
at java.lang.Thread.run(Thread.java:722)
{noformat}</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-899">YARN-899</a>.
Major sub-task reported by Sandy Ryza and fixed by Xuan Gong (scheduler)<br>
<b>Get queue administration ACLs working</b><br>
<blockquote>The Capacity Scheduler documents the yarn.scheduler.capacity.root.&lt;queue-path&gt;.acl_administer_queue config option for controlling who can administer a queue, but it is not hooked up to anything. The Fair Scheduler could make use of a similar option as well. This is a feature-parity regression from MR1.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-890">YARN-890</a>.
Major bug reported by Trupti Dhavle and fixed by Xuan Gong (resourcemanager)<br>
<b>The roundup for memory values on resource manager UI is misleading</b><br>
<blockquote>
From the yarn-site.xml, I see following values-
&lt;property&gt;
&lt;name&gt;yarn.nodemanager.resource.memory-mb&lt;/name&gt;
&lt;value&gt;4192&lt;/value&gt;
&lt;/property&gt;
&lt;property&gt;
&lt;name&gt;yarn.scheduler.maximum-allocation-mb&lt;/name&gt;
&lt;value&gt;4192&lt;/value&gt;
&lt;/property&gt;
&lt;property&gt;
&lt;name&gt;yarn.scheduler.minimum-allocation-mb&lt;/name&gt;
&lt;value&gt;1024&lt;/value&gt;
&lt;/property&gt;
However the resourcemanager UI shows total memory as 5MB
</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-876">YARN-876</a>.
Major bug reported by PengZhang and fixed by PengZhang (resourcemanager)<br>
<b>Node resource is added twice when node comes back from unhealthy to healthy</b><br>
<blockquote>When an unhealthy restarts, its resource maybe added twice in scheduler.
First time is at node's reconnection, while node's final state is still "UNHEALTHY".
And second time is at node's update, while node's state changing from "UNHEALTHY" to "HEALTHY".</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-621">YARN-621</a>.
Critical sub-task reported by Allen Wittenauer and fixed by Omkar Vinit Joshi (resourcemanager)<br>
<b>RM triggers web auth failure before first job</b><br>
<blockquote>On a secure YARN setup, before the first job is executed, going to the web interface of the resource manager triggers authentication errors.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/YARN-49">YARN-49</a>.
Major sub-task reported by Hitesh Shah and fixed by Vinod Kumar Vavilapalli (applications/distributed-shell)<br>
<b>Improve distributed shell application to work on a secure cluster</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5562">MAPREDUCE-5562</a>.
Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
<b>MR AM should exit when unregister() throws exception</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5554">MAPREDUCE-5554</a>.
Minor bug reported by Robert Kanter and fixed by Robert Kanter (test)<br>
<b>hdfs-site.xml included in hadoop-mapreduce-client-jobclient tests jar is breaking tests for downstream components</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5551">MAPREDUCE-5551</a>.
Blocker sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
<b>Binary Incompatibility of O.A.H.U.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5545">MAPREDUCE-5545</a>.
Major bug reported by Robert Kanter and fixed by Robert Kanter <br>
<b>org.apache.hadoop.mapred.TestTaskAttemptListenerImpl.testCommitWindow times out</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5544">MAPREDUCE-5544</a>.
Major bug reported by Sandy Ryza and fixed by Sandy Ryza <br>
<b>JobClient#getJob loads job conf twice</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5538">MAPREDUCE-5538</a>.
Blocker sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
<b>MRAppMaster#shutDownJob shouldn't send job end notification before checking isLastRetry</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5536">MAPREDUCE-5536</a>.
Blocker bug reported by Yesha Vora and fixed by Omkar Vinit Joshi <br>
<b>mapreduce.jobhistory.webapp.https.address property is not respected</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5533">MAPREDUCE-5533</a>.
Major bug reported by Tassapol Athiapinya and fixed by Xuan Gong (applicationmaster)<br>
<b>Speculative execution does not function for reduce</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5531">MAPREDUCE-5531</a>.
Blocker sub-task reported by Robert Kanter and fixed by Robert Kanter (mrv1 , mrv2)<br>
<b>Binary and source incompatibility in mapreduce.TaskID and mapreduce.TaskAttemptID between branch-1 and branch-2</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5530">MAPREDUCE-5530</a>.
Blocker sub-task reported by Robert Kanter and fixed by Robert Kanter (mrv1 , mrv2)<br>
<b>Binary and source incompatibility in mapred.lib.CombineFileInputFormat between branch-1 and branch-2</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5529">MAPREDUCE-5529</a>.
Blocker sub-task reported by Robert Kanter and fixed by Robert Kanter (mrv1 , mrv2)<br>
<b>Binary incompatibilities in mapred.lib.TotalOrderPartitioner between branch-1 and branch-2</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5525">MAPREDUCE-5525</a>.
Minor test reported by Chuan Liu and fixed by Chuan Liu (mrv2 , test)<br>
<b>Increase timeout of TestDFSIO.testAppend and TestMRJobsWithHistoryService.testJobHistoryData</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5523">MAPREDUCE-5523</a>.
Major bug reported by Omkar Vinit Joshi and fixed by Omkar Vinit Joshi <br>
<b>Need to add https port related property in Job history server</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5515">MAPREDUCE-5515</a>.
Major bug reported by Omkar Vinit Joshi and fixed by Omkar Vinit Joshi <br>
<b>Application Manager UI does not appear with Https enabled</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5513">MAPREDUCE-5513</a>.
Major bug reported by Jason Lowe and fixed by Robert Parker <br>
<b>ConcurrentModificationException in JobControl</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5505">MAPREDUCE-5505</a>.
Critical sub-task reported by Jian He and fixed by Zhijie Shen <br>
<b>Clients should be notified job finished only after job successfully unregistered </b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5503">MAPREDUCE-5503</a>.
Blocker bug reported by Jason Lowe and fixed by Jian He (mrv2)<br>
<b>TestMRJobClient.testJobClient is failing</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5489">MAPREDUCE-5489</a>.
Critical bug reported by Yesha Vora and fixed by Zhijie Shen <br>
<b>MR jobs hangs as it does not use the node-blacklisting feature in RM requests</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5488">MAPREDUCE-5488</a>.
Major bug reported by Arpit Gupta and fixed by Jian He <br>
<b>Job recovery fails after killing all the running containers for the app</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5459">MAPREDUCE-5459</a>.
Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
<b>Update the doc of running MRv1 examples jar on YARN</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5442">MAPREDUCE-5442</a>.
Major bug reported by Yingda Chen and fixed by Yingda Chen (client)<br>
<b>$HADOOP_MAPRED_HOME/$HADOOP_CONF_DIR setting not working on Windows</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5170">MAPREDUCE-5170</a>.
Trivial bug reported by Sangjin Lee and fixed by Sangjin Lee (mrv2)<br>
<b>incorrect exception message if min node size &gt; min rack size</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5308">HDFS-5308</a>.
Major improvement reported by Haohui Mai and fixed by Haohui Mai <br>
<b>Replace HttpConfig#getSchemePrefix with implicit schemes in HDFS JSP </b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5306">HDFS-5306</a>.
Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (datanode , namenode)<br>
<b>Datanode https port is not available at the namenode</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5300">HDFS-5300</a>.
Major bug reported by Vinay and fixed by Vinay (namenode)<br>
<b>FSNameSystem#deleteSnapshot() should not check owner in case of permissions disabled</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5299">HDFS-5299</a>.
Blocker bug reported by Vinay and fixed by Vinay (namenode)<br>
<b>DFS client hangs in updatePipeline RPC when failover happened</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5289">HDFS-5289</a>.
Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (test)<br>
<b>Race condition in TestRetryCacheWithHA#testCreateSymlink causes spurious test failure</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5279">HDFS-5279</a>.
Major bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode)<br>
<b>Guard against NullPointerException in NameNode JSP pages before initialization of FSNamesystem.</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5268">HDFS-5268</a>.
Major bug reported by Brandon Li and fixed by Brandon Li (nfs)<br>
<b>NFS write commit verifier is not set in a few places</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5265">HDFS-5265</a>.
Major bug reported by Haohui Mai and fixed by Haohui Mai <br>
<b>Namenode fails to start when dfs.https.port is unspecified</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5259">HDFS-5259</a>.
Major sub-task reported by Yesha Vora and fixed by Brandon Li (nfs)<br>
<b>Support client which combines appended data with old data before sends it to NFS server</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5258">HDFS-5258</a>.
Minor bug reported by Chris Nauroth and fixed by Chuan Liu (test)<br>
<b>Skip tests in TestHDFSCLI that are not applicable on Windows.</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5256">HDFS-5256</a>.
Major improvement reported by Haohui Mai and fixed by Haohui Mai (nfs)<br>
<b>Use guava LoadingCache to implement DFSClientCache</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5255">HDFS-5255</a>.
Major bug reported by Yesha Vora and fixed by Arpit Agarwal <br>
<b>Distcp job fails with hsftp when https is enabled in insecure cluster</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5251">HDFS-5251</a>.
Major bug reported by Haohui Mai and fixed by Haohui Mai <br>
<b>Race between the initialization of NameNode and the http server</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5246">HDFS-5246</a>.
Major sub-task reported by Jinghui Wang and fixed by Jinghui Wang (nfs)<br>
<b>Make Hadoop nfs server port and mount daemon port configurable</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5230">HDFS-5230</a>.
Major sub-task reported by Haohui Mai and fixed by Haohui Mai (nfs)<br>
<b>Introduce RpcInfo to decouple XDR classes from the RPC API</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5228">HDFS-5228</a>.
Blocker bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (hdfs-client)<br>
<b>The RemoteIterator returned by DistributedFileSystem.listFiles(..) may throw NPE</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5186">HDFS-5186</a>.
Minor test reported by Chuan Liu and fixed by Chuan Liu (namenode , test)<br>
<b>TestFileJournalManager fails on Windows due to file handle leaks</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5139">HDFS-5139</a>.
Major improvement reported by Arpit Agarwal and fixed by Arpit Agarwal (tools)<br>
<b>Remove redundant -R option from setrep</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-5031">HDFS-5031</a>.
Blocker bug reported by Vinay and fixed by Vinay (datanode)<br>
<b>BlockScanner scans the block multiple times and on restart scans everything</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HDFS-4817">HDFS-4817</a>.
Minor improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (hdfs-client)<br>
<b>make HDFS advisory caching configurable on a per-file basis</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10020">HADOOP-10020</a>.
Blocker sub-task reported by Colin Patrick McCabe and fixed by Sanjay Radia (fs)<br>
<b>disable symlinks temporarily</b><br>
<blockquote>During review of symbolic links, many issues were found related impact on semantics of existing APIs such FileSystem#listStatus, FileSystem#globStatus etc. There were also many issues brought up about symbolic links and the impact on security and functionality of HDFS. All these issues will be address in the upcoming release 2.3. Until then the feature is temporarily disabled.</blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10017">HADOOP-10017</a>.
Major sub-task reported by Jing Zhao and fixed by Haohui Mai <br>
<b>Fix NPE in DFSClient#getDelegationToken when doing Distcp from a secured cluster to an insecured cluster</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10012">HADOOP-10012</a>.
Blocker bug reported by Arpit Gupta and fixed by Suresh Srinivas (ha)<br>
<b>Secure Oozie jobs fail with delegation token renewal exception in Namenode HA setup</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10003">HADOOP-10003</a>.
Major bug reported by Jason Dere and fixed by (fs)<br>
<b>HarFileSystem.listLocatedStatus() fails</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9976">HADOOP-9976</a>.
Major bug reported by Karthik Kambatla and fixed by Karthik Kambatla <br>
<b>Different versions of avro and avro-maven-plugin</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9948">HADOOP-9948</a>.
Minor test reported by Chuan Liu and fixed by Chuan Liu (test)<br>
<b>Add a config value to CLITestHelper to skip tests on Windows</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9776">HADOOP-9776</a>.
Major bug reported by shanyu zhao and fixed by shanyu zhao (fs)<br>
<b>HarFileSystem.listStatus() returns invalid authority if port number is empty</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9761">HADOOP-9761</a>.
Blocker bug reported by Andrew Wang and fixed by Andrew Wang (viewfs)<br>
<b>ViewFileSystem#rename fails when using DistributedFileSystem</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9758">HADOOP-9758</a>.
Major improvement reported by Andrew Wang and fixed by Andrew Wang <br>
<b>Provide configuration option for FileSystem/FileContext symlink resolution</b><br>
<blockquote></blockquote></li>
<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8315">HADOOP-8315</a>.
Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (auto-failover , ha)<br>
<b>Support SASL-authenticated ZooKeeper in ActiveStandbyElector</b><br>
<blockquote></blockquote></li>
</ul>
</body></html>
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Hadoop 2.1.1-beta Release Notes</title> <title>Hadoop 2.1.1-beta Release Notes</title>
<STYLE type="text/css"> <STYLE type="text/css">
H1 {font-family: sans-serif} H1 {font-family: sans-serif}

View File

@ -264,5 +264,9 @@ public class CommonConfigurationKeysPublic {
/** Default value for HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN */ /** Default value for HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN */
public static final int HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT = public static final int HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT =
60; 60;
// HTTP policies to be used in configuration
public static final String HTTP_POLICY_HTTP_ONLY = "HTTP_ONLY";
public static final String HTTP_POLICY_HTTPS_ONLY = "HTTPS_ONLY";
} }

View File

@ -0,0 +1,41 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown when the startAfter can't be found when listing a directory.
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Stable
public class DirectoryListingStartAfterNotFoundException extends IOException {
private static final long serialVersionUID = 1L;
public DirectoryListingStartAfterNotFoundException() {
super();
}
public DirectoryListingStartAfterNotFoundException(String msg) {
super(msg);
}
}

View File

@ -17,20 +17,6 @@
*/ */
package org.apache.hadoop.fs; package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.TreeMap;
import java.util.HashMap;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -40,6 +26,14 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader; import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLDecoder;
import java.util.*;
/** /**
* This is an implementation of the Hadoop Archive * This is an implementation of the Hadoop Archive
* Filesystem. This archive Filesystem has index files * Filesystem. This archive Filesystem has index files
@ -53,7 +47,7 @@ import org.apache.hadoop.util.Progressable;
* index for ranges of hashcodes. * index for ranges of hashcodes.
*/ */
public class HarFileSystem extends FilterFileSystem { public class HarFileSystem extends FileSystem {
private static final Log LOG = LogFactory.getLog(HarFileSystem.class); private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
@ -75,11 +69,13 @@ public class HarFileSystem extends FilterFileSystem {
// pointer into the static metadata cache // pointer into the static metadata cache
private HarMetaData metadata; private HarMetaData metadata;
private FileSystem fs;
/** /**
* public construction of harfilesystem * public construction of harfilesystem
*
*/ */
public HarFileSystem() { public HarFileSystem() {
// Must call #initialize() method to set the underlying file system
} }
/** /**
@ -96,10 +92,11 @@ public class HarFileSystem extends FilterFileSystem {
/** /**
* Constructor to create a HarFileSystem with an * Constructor to create a HarFileSystem with an
* underlying filesystem. * underlying filesystem.
* @param fs * @param fs underlying file system
*/ */
public HarFileSystem(FileSystem fs) { public HarFileSystem(FileSystem fs) {
super(fs); this.fs = fs;
this.statistics = fs.statistics;
} }
private synchronized void initializeMetadataCache(Configuration conf) { private synchronized void initializeMetadataCache(Configuration conf) {
@ -171,6 +168,11 @@ public class HarFileSystem extends FilterFileSystem {
} }
} }
@Override
public Configuration getConf() {
return fs.getConf();
}
// get the version of the filesystem from the masterindex file // get the version of the filesystem from the masterindex file
// the version is currently not useful since its the first version // the version is currently not useful since its the first version
// of archives // of archives
@ -236,8 +238,7 @@ public class HarFileSystem extends FilterFileSystem {
throw new IOException("query component in Path not supported " + rawURI); throw new IOException("query component in Path not supported " + rawURI);
} }
URI tmp = null; URI tmp;
try { try {
// convert <scheme>-<host> to <scheme>://<host> // convert <scheme>-<host> to <scheme>://<host>
URI baseUri = new URI(authority.replaceFirst("-", "://")); URI baseUri = new URI(authority.replaceFirst("-", "://"));
@ -256,7 +257,7 @@ public class HarFileSystem extends FilterFileSystem {
return URLDecoder.decode(str, "UTF-8"); return URLDecoder.decode(str, "UTF-8");
} }
private String decodeFileName(String fname) private String decodeFileName(String fname)
throws UnsupportedEncodingException { throws UnsupportedEncodingException {
int version = metadata.getVersion(); int version = metadata.getVersion();
if (version == 2 || version == 3){ if (version == 2 || version == 3){
@ -272,11 +273,21 @@ public class HarFileSystem extends FilterFileSystem {
public Path getWorkingDirectory() { public Path getWorkingDirectory() {
return new Path(uri.toString()); return new Path(uri.toString());
} }
@Override
public Path getInitialWorkingDirectory() {
return getWorkingDirectory();
}
@Override
public FsStatus getStatus(Path p) throws IOException {
return fs.getStatus(p);
}
/** /**
* Create a har specific auth * Create a har specific auth
* har-underlyingfs:port * har-underlyingfs:port
* @param underLyingURI the uri of underlying * @param underLyingUri the uri of underlying
* filesystem * filesystem
* @return har specific auth * @return har specific auth
*/ */
@ -294,7 +305,21 @@ public class HarFileSystem extends FilterFileSystem {
} }
return auth; return auth;
} }
/**
* Used for delegation token related functionality. Must delegate to
* underlying file system.
*/
@Override
protected URI getCanonicalUri() {
return fs.getCanonicalUri();
}
@Override
protected URI canonicalizeUri(URI uri) {
return fs.canonicalizeUri(uri);
}
/** /**
* Returns the uri of this filesystem. * Returns the uri of this filesystem.
* The uri is of the form * The uri is of the form
@ -305,6 +330,16 @@ public class HarFileSystem extends FilterFileSystem {
return this.uri; return this.uri;
} }
@Override
protected void checkPath(Path path) {
fs.checkPath(path);
}
@Override
public Path resolvePath(Path p) throws IOException {
return fs.resolvePath(p);
}
/** /**
* this method returns the path * this method returns the path
* inside the har filesystem. * inside the har filesystem.
@ -419,7 +454,7 @@ public class HarFileSystem extends FilterFileSystem {
/** /**
* Get block locations from the underlying fs and fix their * Get block locations from the underlying fs and fix their
* offsets and lengths. * offsets and lengths.
* @param file the input filestatus to get block locations * @param file the input file status to get block locations
* @param start the start of the desired range in the contained file * @param start the start of the desired range in the contained file
* @param len the length of the desired range * @param len the length of the desired range
* @return block locations for this segment of file * @return block locations for this segment of file
@ -441,8 +476,7 @@ public class HarFileSystem extends FilterFileSystem {
} }
/** /**
* the hash of the path p inside iniside * the hash of the path p inside the filesystem
* the filesystem
* @param p the path in the harfilesystem * @param p the path in the harfilesystem
* @return the hash code of the path. * @return the hash code of the path.
*/ */
@ -475,13 +509,9 @@ public class HarFileSystem extends FilterFileSystem {
* the parent path directory * the parent path directory
* @param statuses * @param statuses
* the list to add the children filestatuses to * the list to add the children filestatuses to
* @param children
* the string list of children for this parent
* @param archiveIndexStat
* the archive index filestatus
*/ */
private void fileStatusesInIndex(HarStatus parent, List<FileStatus> statuses, private void fileStatusesInIndex(HarStatus parent, List<FileStatus> statuses)
List<String> children) throws IOException { throws IOException {
String parentString = parent.getName(); String parentString = parent.getName();
if (!parentString.endsWith(Path.SEPARATOR)){ if (!parentString.endsWith(Path.SEPARATOR)){
parentString += Path.SEPARATOR; parentString += Path.SEPARATOR;
@ -547,7 +577,7 @@ public class HarFileSystem extends FilterFileSystem {
// stored in a single line in the index files // stored in a single line in the index files
// the format is of the form // the format is of the form
// filename "dir"/"file" partFileName startIndex length // filename "dir"/"file" partFileName startIndex length
// <space seperated children> // <space separated children>
private class HarStatus { private class HarStatus {
boolean isDir; boolean isDir;
String name; String name;
@ -666,7 +696,6 @@ public class HarFileSystem extends FilterFileSystem {
public FSDataInputStream open(Path f, int bufferSize) throws IOException { public FSDataInputStream open(Path f, int bufferSize) throws IOException {
// get the fs DataInputStream for the underlying file // get the fs DataInputStream for the underlying file
HarStatus hstatus = getFileHarStatus(f); HarStatus hstatus = getFileHarStatus(f);
// we got it.. woo hooo!!!
if (hstatus.isDir()) { if (hstatus.isDir()) {
throw new FileNotFoundException(f + " : not a file in " + throw new FileNotFoundException(f + " : not a file in " +
archivePath); archivePath);
@ -675,20 +704,39 @@ public class HarFileSystem extends FilterFileSystem {
hstatus.getPartName()), hstatus.getPartName()),
hstatus.getStartIndex(), hstatus.getLength(), bufferSize); hstatus.getStartIndex(), hstatus.getLength(), bufferSize);
} }
/**
* Used for delegation token related functionality. Must delegate to
* underlying file system.
*/
@Override @Override
public FSDataOutputStream create(Path f, public FileSystem[] getChildFileSystems() {
FsPermission permission, return new FileSystem[]{fs};
boolean overwrite, }
int bufferSize,
short replication, @Override
long blockSize, public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException { Progressable progress) throws IOException {
throw new IOException("Har: create not allowed."); throw new IOException("Har: create not allowed.");
} }
@SuppressWarnings("deprecation")
@Override
public FSDataOutputStream createNonRecursive(Path f, boolean overwrite,
int bufferSize, short replication, long blockSize, Progressable progress)
throws IOException {
throw new IOException("Har: create not allowed.");
}
@Override
public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
throw new IOException("Har: append not allowed.");
}
@Override @Override
public void close() throws IOException { public void close() throws IOException {
super.close();
if (fs != null) { if (fs != null) {
try { try {
fs.close(); fs.close();
@ -704,9 +752,19 @@ public class HarFileSystem extends FilterFileSystem {
*/ */
@Override @Override
public boolean setReplication(Path src, short replication) throws IOException{ public boolean setReplication(Path src, short replication) throws IOException{
throw new IOException("Har: setreplication not allowed"); throw new IOException("Har: setReplication not allowed");
} }
@Override
public boolean rename(Path src, Path dst) throws IOException {
throw new IOException("Har: rename not allowed");
}
@Override
public FSDataOutputStream append(Path f) throws IOException {
throw new IOException("Har: append not allowed");
}
/** /**
* Not implemented. * Not implemented.
*/ */
@ -714,7 +772,7 @@ public class HarFileSystem extends FilterFileSystem {
public boolean delete(Path f, boolean recursive) throws IOException { public boolean delete(Path f, boolean recursive) throws IOException {
throw new IOException("Har: delete not allowed"); throw new IOException("Har: delete not allowed");
} }
/** /**
* liststatus returns the children of a directory * liststatus returns the children of a directory
* after looking up the index files. * after looking up the index files.
@ -733,7 +791,7 @@ public class HarFileSystem extends FilterFileSystem {
throw new FileNotFoundException("File " + f + " not found in " + archivePath); throw new FileNotFoundException("File " + f + " not found in " + archivePath);
} }
if (hstatus.isDir()) { if (hstatus.isDir()) {
fileStatusesInIndex(hstatus, statuses, hstatus.children); fileStatusesInIndex(hstatus, statuses);
} else { } else {
statuses.add(toFileStatus(hstatus, null)); statuses.add(toFileStatus(hstatus, null));
} }
@ -748,7 +806,7 @@ public class HarFileSystem extends FilterFileSystem {
public Path getHomeDirectory() { public Path getHomeDirectory() {
return new Path(uri.toString()); return new Path(uri.toString());
} }
@Override @Override
public void setWorkingDirectory(Path newDir) { public void setWorkingDirectory(Path newDir) {
//does nothing. //does nothing.
@ -766,11 +824,17 @@ public class HarFileSystem extends FilterFileSystem {
* not implemented. * not implemented.
*/ */
@Override @Override
public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws public void copyFromLocalFile(boolean delSrc, boolean overwrite,
IOException { Path src, Path dst) throws IOException {
throw new IOException("Har: copyfromlocalfile not allowed"); throw new IOException("Har: copyfromlocalfile not allowed");
} }
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite,
Path[] srcs, Path dst) throws IOException {
throw new IOException("Har: copyfromlocalfile not allowed");
}
/** /**
* copies the file in the har filesystem to a local file. * copies the file in the har filesystem to a local file.
*/ */
@ -807,11 +871,16 @@ public class HarFileSystem extends FilterFileSystem {
throw new IOException("Har: setowner not allowed"); throw new IOException("Har: setowner not allowed");
} }
@Override
public void setTimes(Path p, long mtime, long atime) throws IOException {
throw new IOException("Har: setTimes not allowed");
}
/** /**
* Not implemented. * Not implemented.
*/ */
@Override @Override
public void setPermission(Path p, FsPermission permisssion) public void setPermission(Path p, FsPermission permission)
throws IOException { throws IOException {
throw new IOException("Har: setPermission not allowed"); throw new IOException("Har: setPermission not allowed");
} }
@ -900,7 +969,7 @@ public class HarFileSystem extends FilterFileSystem {
newlen = (int) (end - position); newlen = (int) (end - position);
} }
// end case // end case
if (newlen == 0) if (newlen == 0)
return ret; return ret;
ret = underLyingStream.read(b, offset, newlen); ret = underLyingStream.read(b, offset, newlen);
position += ret; position += ret;
@ -937,8 +1006,8 @@ public class HarFileSystem extends FilterFileSystem {
@Override @Override
public boolean seekToNewSource(long targetPos) throws IOException { public boolean seekToNewSource(long targetPos) throws IOException {
//do not need to implement this // do not need to implement this
// hdfs in itself does seektonewsource // hdfs in itself does seektonewsource
// while reading. // while reading.
return false; return false;
} }
@ -974,14 +1043,12 @@ public class HarFileSystem extends FilterFileSystem {
} }
@Override @Override
public void setReadahead(Long readahead) public void setReadahead(Long readahead) throws IOException {
throws IOException, UnsupportedEncodingException {
underLyingStream.setReadahead(readahead); underLyingStream.setReadahead(readahead);
} }
@Override @Override
public void setDropBehind(Boolean dropBehind) public void setDropBehind(Boolean dropBehind) throws IOException {
throws IOException, UnsupportedEncodingException {
underLyingStream.setDropBehind(dropBehind); underLyingStream.setDropBehind(dropBehind);
} }
} }
@ -999,19 +1066,6 @@ public class HarFileSystem extends FilterFileSystem {
long length, int bufsize) throws IOException { long length, int bufsize) throws IOException {
super(new HarFsInputStream(fs, p, start, length, bufsize)); super(new HarFsInputStream(fs, p, start, length, bufsize));
} }
/**
* constructor for har input stream.
* @param fs the underlying filesystem
* @param p the path in the underlying file system
* @param start the start position in the part file
* @param length the length of valid data in the part file.
* @throws IOException
*/
public HarFSDataInputStream(FileSystem fs, Path p, long start, long length)
throws IOException {
super(new HarFsInputStream(fs, p, start, length, 0));
}
} }
private class HarMetaData { private class HarMetaData {
@ -1058,7 +1112,7 @@ public class HarFileSystem extends FilterFileSystem {
} }
private void parseMetaData() throws IOException { private void parseMetaData() throws IOException {
Text line; Text line = new Text();
long read; long read;
FSDataInputStream in = null; FSDataInputStream in = null;
LineReader lin = null; LineReader lin = null;
@ -1068,7 +1122,6 @@ public class HarFileSystem extends FilterFileSystem {
FileStatus masterStat = fs.getFileStatus(masterIndexPath); FileStatus masterStat = fs.getFileStatus(masterIndexPath);
masterIndexTimestamp = masterStat.getModificationTime(); masterIndexTimestamp = masterStat.getModificationTime();
lin = new LineReader(in, getConf()); lin = new LineReader(in, getConf());
line = new Text();
read = lin.readLine(line); read = lin.readLine(line);
// the first line contains the version of the index file // the first line contains the version of the index file
@ -1082,7 +1135,7 @@ public class HarFileSystem extends FilterFileSystem {
} }
// each line contains a hashcode range and the index file name // each line contains a hashcode range and the index file name
String[] readStr = null; String[] readStr;
while(read < masterStat.getLen()) { while(read < masterStat.getLen()) {
int b = lin.readLine(line); int b = lin.readLine(line);
read += b; read += b;
@ -1094,6 +1147,9 @@ public class HarFileSystem extends FilterFileSystem {
endHash)); endHash));
line.clear(); line.clear();
} }
} catch (IOException ioe) {
LOG.warn("Encountered exception ", ioe);
throw ioe;
} finally { } finally {
IOUtils.cleanup(LOG, lin, in); IOUtils.cleanup(LOG, lin, in);
} }
@ -1145,4 +1201,43 @@ public class HarFileSystem extends FilterFileSystem {
return size() > MAX_ENTRIES; return size() > MAX_ENTRIES;
} }
} }
@SuppressWarnings("deprecation")
@Override
public FsServerDefaults getServerDefaults() throws IOException {
return fs.getServerDefaults();
}
@Override
public FsServerDefaults getServerDefaults(Path f) throws IOException {
return fs.getServerDefaults(f);
}
@Override
public long getUsed() throws IOException{
return fs.getUsed();
}
@SuppressWarnings("deprecation")
@Override
public long getDefaultBlockSize() {
return fs.getDefaultBlockSize();
}
@SuppressWarnings("deprecation")
@Override
public long getDefaultBlockSize(Path f) {
return fs.getDefaultBlockSize(f);
}
@SuppressWarnings("deprecation")
@Override
public short getDefaultReplication() {
return fs.getDefaultReplication();
}
@Override
public short getDefaultReplication(Path f) {
return fs.getDefaultReplication(f);
}
} }

View File

@ -84,11 +84,16 @@ abstract class CommandWithDestination extends FsCommand {
*/ */
protected void getLocalDestination(LinkedList<String> args) protected void getLocalDestination(LinkedList<String> args)
throws IOException { throws IOException {
String pathString = (args.size() < 2) ? Path.CUR_DIR : args.removeLast();
try { try {
String pathString = (args.size() < 2) ? Path.CUR_DIR : args.removeLast();
dst = new PathData(new URI(pathString), getConf()); dst = new PathData(new URI(pathString), getConf());
} catch (URISyntaxException e) { } catch (URISyntaxException e) {
throw new IOException("unexpected URISyntaxException", e); if (Path.WINDOWS) {
// Unlike URI, PathData knows how to parse Windows drive-letter paths.
dst = new PathData(pathString, getConf());
} else {
throw new IOException("unexpected URISyntaxException", e);
}
} }
} }

View File

@ -204,13 +204,18 @@ class CopyCommands {
// commands operating on local paths have no need for glob expansion // commands operating on local paths have no need for glob expansion
@Override @Override
protected List<PathData> expandArgument(String arg) throws IOException { protected List<PathData> expandArgument(String arg) throws IOException {
List<PathData> items = new LinkedList<PathData>();
try { try {
List<PathData> items = new LinkedList<PathData>();
items.add(new PathData(new URI(arg), getConf())); items.add(new PathData(new URI(arg), getConf()));
return items;
} catch (URISyntaxException e) { } catch (URISyntaxException e) {
throw new IOException("unexpected URISyntaxException", e); if (Path.WINDOWS) {
// Unlike URI, PathData knows how to parse Windows drive-letter paths.
items.add(new PathData(arg, getConf()));
} else {
throw new IOException("unexpected URISyntaxException", e);
}
} }
return items;
} }
@Override @Override

View File

@ -568,6 +568,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
enterNeutralMode(); enterNeutralMode();
reJoinElection(0); reJoinElection(0);
break; break;
case SaslAuthenticated:
LOG.info("Successfully authenticated to ZooKeeper using SASL.");
break;
default: default:
fatalError("Unexpected Zookeeper watch event state: " fatalError("Unexpected Zookeeper watch event state: "
+ event.getState()); + event.getState());

View File

@ -28,25 +28,41 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Unstable @InterfaceStability.Unstable
public class HttpConfig { public class HttpConfig {
private static boolean sslEnabled; private static Policy policy;
public enum Policy {
HTTP_ONLY,
HTTPS_ONLY;
public static Policy fromString(String value) {
if (value.equalsIgnoreCase(CommonConfigurationKeysPublic
.HTTP_POLICY_HTTPS_ONLY)) {
return HTTPS_ONLY;
}
return HTTP_ONLY;
}
}
static { static {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
sslEnabled = conf.getBoolean( boolean sslEnabled = conf.getBoolean(
CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY, CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY,
CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT); CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT);
policy = sslEnabled ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY;
} }
public static void setSecure(boolean secure) { public static void setPolicy(Policy policy) {
sslEnabled = secure; HttpConfig.policy = policy;
} }
public static boolean isSecure() { public static boolean isSecure() {
return sslEnabled; return policy == Policy.HTTPS_ONLY;
} }
public static String getSchemePrefix() { public static String getSchemePrefix() {
return (isSecure()) ? "https://" : "http://"; return (isSecure()) ? "https://" : "http://";
} }
public static String getScheme(Policy policy) {
return policy == Policy.HTTPS_ONLY ? "https://" : "http://";
}
} }

View File

@ -341,6 +341,7 @@ public class HttpServer implements FilterContainer {
} }
listener.setHost(bindAddress); listener.setHost(bindAddress);
listener.setPort(port); listener.setPort(port);
LOG.info("SSL is enabled on " + toString());
} else { } else {
listenerStartedExternally = true; listenerStartedExternally = true;
listener = connector; listener = connector;

View File

@ -34,6 +34,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.net.ConnectTimeoutException; import org.apache.hadoop.net.ConnectTimeoutException;
@ -531,6 +532,15 @@ public class RetryPolicies {
this.maxDelayBase = maxDelayBase; this.maxDelayBase = maxDelayBase;
} }
/**
* @return 0 if this is our first failover/retry (i.e., retry immediately),
* sleep exponentially otherwise
*/
private long getFailoverOrRetrySleepTime(int times) {
return times == 0 ? 0 :
calculateExponentialTime(delayMillis, times, maxDelayBase);
}
@Override @Override
public RetryAction shouldRetry(Exception e, int retries, public RetryAction shouldRetry(Exception e, int retries,
int failovers, boolean isIdempotentOrAtMostOnce) throws Exception { int failovers, boolean isIdempotentOrAtMostOnce) throws Exception {
@ -546,11 +556,8 @@ public class RetryPolicies {
e instanceof StandbyException || e instanceof StandbyException ||
e instanceof ConnectTimeoutException || e instanceof ConnectTimeoutException ||
isWrappedStandbyException(e)) { isWrappedStandbyException(e)) {
return new RetryAction( return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
RetryAction.RetryDecision.FAILOVER_AND_RETRY, getFailoverOrRetrySleepTime(failovers));
// retry immediately if this is our first failover, sleep otherwise
failovers == 0 ? 0 :
calculateExponentialTime(delayMillis, failovers, maxDelayBase));
} else if (e instanceof SocketException || } else if (e instanceof SocketException ||
(e instanceof IOException && !(e instanceof RemoteException))) { (e instanceof IOException && !(e instanceof RemoteException))) {
if (isIdempotentOrAtMostOnce) { if (isIdempotentOrAtMostOnce) {
@ -561,8 +568,14 @@ public class RetryPolicies {
"whether it was invoked"); "whether it was invoked");
} }
} else { } else {
return fallbackPolicy.shouldRetry(e, retries, failovers, RetriableException re = getWrappedRetriableException(e);
isIdempotentOrAtMostOnce); if (re != null) {
return new RetryAction(RetryAction.RetryDecision.RETRY,
getFailoverOrRetrySleepTime(retries));
} else {
return fallbackPolicy.shouldRetry(e, retries, failovers,
isIdempotentOrAtMostOnce);
}
} }
} }
@ -596,4 +609,14 @@ public class RetryPolicies {
StandbyException.class); StandbyException.class);
return unwrapped instanceof StandbyException; return unwrapped instanceof StandbyException;
} }
private static RetriableException getWrappedRetriableException(Exception e) {
if (!(e instanceof RemoteException)) {
return null;
}
Exception unwrapped = ((RemoteException)e).unwrapRemoteException(
RetriableException.class);
return unwrapped instanceof RetriableException ?
(RetriableException) unwrapped : null;
}
} }

View File

@ -0,0 +1,41 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Exception thrown by a server typically to indicate that server is in a state
* where request cannot be processed temporarily (such as still starting up).
* Client may retry the request. If the service is up, the server may be able to
* process a retried request.
*/
@InterfaceStability.Evolving
public class RetriableException extends IOException {
private static final long serialVersionUID = 1915561725516487301L;
public RetriableException(Exception e) {
super(e);
}
public RetriableException(String msg) {
super(msg);
}
}

View File

@ -1295,6 +1295,29 @@ public abstract class Server {
} }
} }
private Throwable getCauseForInvalidToken(IOException e) {
Throwable cause = e;
while (cause != null) {
if (cause instanceof RetriableException) {
return (RetriableException) cause;
} else if (cause instanceof StandbyException) {
return (StandbyException) cause;
} else if (cause instanceof InvalidToken) {
// FIXME: hadoop method signatures are restricting the SASL
// callbacks to only returning InvalidToken, but some services
// need to throw other exceptions (ex. NN + StandyException),
// so for now we'll tunnel the real exceptions via an
// InvalidToken's cause which normally is not set
if (cause.getCause() != null) {
cause = cause.getCause();
}
return cause;
}
cause = cause.getCause();
}
return e;
}
private void saslProcess(RpcSaslProto saslMessage) private void saslProcess(RpcSaslProto saslMessage)
throws WrappedRpcServerException, IOException, InterruptedException { throws WrappedRpcServerException, IOException, InterruptedException {
if (saslContextEstablished) { if (saslContextEstablished) {
@ -1307,29 +1330,11 @@ public abstract class Server {
try { try {
saslResponse = processSaslMessage(saslMessage); saslResponse = processSaslMessage(saslMessage);
} catch (IOException e) { } catch (IOException e) {
IOException sendToClient = e;
Throwable cause = e;
while (cause != null) {
if (cause instanceof InvalidToken) {
// FIXME: hadoop method signatures are restricting the SASL
// callbacks to only returning InvalidToken, but some services
// need to throw other exceptions (ex. NN + StandyException),
// so for now we'll tunnel the real exceptions via an
// InvalidToken's cause which normally is not set
if (cause.getCause() != null) {
cause = cause.getCause();
}
sendToClient = (IOException) cause;
break;
}
cause = cause.getCause();
}
rpcMetrics.incrAuthenticationFailures(); rpcMetrics.incrAuthenticationFailures();
String clientIP = this.toString();
// attempting user could be null // attempting user could be null
AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser + AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + ":"
" (" + e.getLocalizedMessage() + ")"); + attemptingUser + " (" + e.getLocalizedMessage() + ")");
throw sendToClient; throw (IOException) getCauseForInvalidToken(e);
} }
if (saslServer != null && saslServer.isComplete()) { if (saslServer != null && saslServer.isComplete()) {

View File

@ -45,11 +45,13 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.Server.Connection; import org.apache.hadoop.ipc.Server.Connection;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.TokenIdentifier;
/** /**
* A utility class for dealing with SASL on RPC server * A utility class for dealing with SASL on RPC server
@ -267,13 +269,15 @@ public class SaslRpcServer {
this.connection = connection; this.connection = connection;
} }
private char[] getPassword(TokenIdentifier tokenid) throws InvalidToken { private char[] getPassword(TokenIdentifier tokenid) throws InvalidToken,
return encodePassword(secretManager.retrievePassword(tokenid)); StandbyException, RetriableException, IOException {
return encodePassword(secretManager.retriableRetrievePassword(tokenid));
} }
@Override @Override
public void handle(Callback[] callbacks) throws InvalidToken, public void handle(Callback[] callbacks) throws InvalidToken,
UnsupportedCallbackException { UnsupportedCallbackException, StandbyException, RetriableException,
IOException {
NameCallback nc = null; NameCallback nc = null;
PasswordCallback pc = null; PasswordCallback pc = null;
AuthorizeCallback ac = null; AuthorizeCallback ac = null;
@ -292,7 +296,8 @@ public class SaslRpcServer {
} }
} }
if (pc != null) { if (pc != null) {
TokenIdentifier tokenIdentifier = getIdentifier(nc.getDefaultName(), secretManager); TokenIdentifier tokenIdentifier = getIdentifier(nc.getDefaultName(),
secretManager);
char[] password = getPassword(tokenIdentifier); char[] password = getPassword(tokenIdentifier);
UserGroupInformation user = null; UserGroupInformation user = null;
user = tokenIdentifier.getUser(); // may throw exception user = tokenIdentifier.getUser(); // may throw exception

View File

@ -33,6 +33,7 @@ import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
@ -1325,7 +1326,14 @@ public class UserGroupInformation {
* @return Credentials of tokens associated with this user * @return Credentials of tokens associated with this user
*/ */
public synchronized Credentials getCredentials() { public synchronized Credentials getCredentials() {
return new Credentials(getCredentialsInternal()); Credentials creds = new Credentials(getCredentialsInternal());
Iterator<Token<?>> iter = creds.getAllTokens().iterator();
while (iter.hasNext()) {
if (iter.next() instanceof Token.PrivateToken) {
iter.remove();
}
}
return creds;
} }
/** /**

View File

@ -29,6 +29,7 @@ import javax.crypto.spec.SecretKeySpec;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.ipc.StandbyException;
@ -66,7 +67,29 @@ public abstract class SecretManager<T extends TokenIdentifier> {
* @return the password to use * @return the password to use
* @throws InvalidToken the token was invalid * @throws InvalidToken the token was invalid
*/ */
public abstract byte[] retrievePassword(T identifier) throws InvalidToken; public abstract byte[] retrievePassword(T identifier)
throws InvalidToken;
/**
* The same functionality with {@link #retrievePassword}, except that this
* method can throw a {@link RetriableException} or a {@link StandbyException}
* to indicate that client can retry/failover the same operation because of
* temporary issue on the server side.
*
* @param identifier the identifier to validate
* @return the password to use
* @throws InvalidToken the token was invalid
* @throws StandbyException the server is in standby state, the client can
* try other servers
* @throws RetriableException the token was invalid, and the server thinks
* this may be a temporary issue and suggests the client to retry
* @throws IOException to allow future exceptions to be added without breaking
* compatibility
*/
public byte[] retriableRetrievePassword(T identifier)
throws InvalidToken, StandbyException, RetriableException, IOException {
return retrievePassword(identifier);
}
/** /**
* Create an empty token identifier. * Create an empty token identifier.

View File

@ -19,31 +19,20 @@
package org.apache.hadoop.security.token; package org.apache.hadoop.security.token;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
import java.util.ServiceLoader;
import org.apache.commons.codec.binary.Base64; import org.apache.commons.codec.binary.Base64;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.*;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import java.io.*;
import java.util.Arrays;
import java.util.Map;
import java.util.ServiceLoader;
/** /**
* The client-side form of the token. * The client-side form of the token.
*/ */
@ -195,6 +184,19 @@ public class Token<T extends TokenIdentifier> implements Writable {
service = newService; service = newService;
} }
/**
* Indicates whether the token is a clone. Used by HA failover proxy
* to indicate a token should not be visible to the user via
* UGI.getCredentials()
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static class PrivateToken<T extends TokenIdentifier> extends Token<T> {
public PrivateToken(Token<T> token) {
super(token);
}
}
@Override @Override
public void readFields(DataInput in) throws IOException { public void readFields(DataInput in) throws IOException {
int len = WritableUtils.readVInt(in); int len = WritableUtils.readVInt(in);

View File

@ -45,7 +45,7 @@ import org.apache.hadoop.util.Time;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "Hive"})
@InterfaceStability.Evolving @InterfaceStability.Evolving
public abstract public abstract
class AbstractDelegationTokenSecretManager<TokenIdent class AbstractDelegationTokenSecretManager<TokenIdent
@ -289,20 +289,30 @@ extends AbstractDelegationTokenIdentifier>
+ tokenRenewInterval, password, getTrackingIdIfEnabled(identifier))); + tokenRenewInterval, password, getTrackingIdIfEnabled(identifier)));
return password; return password;
} }
@Override /**
public synchronized byte[] retrievePassword(TokenIdent identifier) * Find the DelegationTokenInformation for the given token id, and verify that
* if the token is expired. Note that this method should be called with
* acquiring the secret manager's monitor.
*/
protected DelegationTokenInformation checkToken(TokenIdent identifier)
throws InvalidToken { throws InvalidToken {
assert Thread.holdsLock(this);
DelegationTokenInformation info = currentTokens.get(identifier); DelegationTokenInformation info = currentTokens.get(identifier);
if (info == null) { if (info == null) {
throw new InvalidToken("token (" + identifier.toString() throw new InvalidToken("token (" + identifier.toString()
+ ") can't be found in cache"); + ") can't be found in cache");
} }
long now = Time.now(); if (info.getRenewDate() < Time.now()) {
if (info.getRenewDate() < now) {
throw new InvalidToken("token (" + identifier.toString() + ") is expired"); throw new InvalidToken("token (" + identifier.toString() + ") is expired");
} }
return info.getPassword(); return info;
}
@Override
public synchronized byte[] retrievePassword(TokenIdent identifier)
throws InvalidToken {
return checkToken(identifier).getPassword();
} }
protected String getTrackingIdIfEnabled(TokenIdent ident) { protected String getTrackingIdIfEnabled(TokenIdent ident) {

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?> <?xml version="1.0" encoding="utf-8"?>
<!-- <!--
Licensed to the Apache Software Foundation (ASF) under one or more Licensed to the Apache Software Foundation (ASF) under one or more

View File

@ -854,8 +854,10 @@ KVNO Timestamp Principal
| | The container process has the same Unix user as the NodeManager. | | | The container process has the same Unix user as the NodeManager. |
*--------------------------------------+--------------------------------------+ *--------------------------------------+--------------------------------------+
| <<<LinuxContainerExecutor>>> | | | <<<LinuxContainerExecutor>>> | |
| | Supported only on GNU/Linux, this executor runs the containers as the | | | Supported only on GNU/Linux, this executor runs the containers as either the |
| | user who submitted the application. It requires all user accounts to be | | | YARN user who submitted the application (when full security is enabled) or |
| | as a dedicated user (defaults to nobody) when full security is not enabled. |
| | When full security is enabled, this executor requires all user accounts to be |
| | created on the cluster nodes where the containers are launched. It uses | | | created on the cluster nodes where the containers are launched. It uses |
| | a <setuid> executable that is included in the Hadoop distribution. | | | a <setuid> executable that is included in the Hadoop distribution. |
| | The NodeManager uses this executable to launch and kill containers. | | | The NodeManager uses this executable to launch and kill containers. |

View File

@ -24,6 +24,8 @@ import java.io.FileInputStream;
import java.io.FileOutputStream; import java.io.FileOutputStream;
import java.io.FileReader; import java.io.FileReader;
import java.io.IOException; import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.io.PrintWriter; import java.io.PrintWriter;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -32,15 +34,20 @@ import java.util.List;
import java.util.jar.Attributes; import java.util.jar.Attributes;
import java.util.jar.JarFile; import java.util.jar.JarFile;
import java.util.jar.Manifest; import java.util.jar.Manifest;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.tools.tar.TarEntry;
import org.apache.tools.tar.TarOutputStream;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.*;
public class TestFileUtil { public class TestFileUtil {
private static final Log LOG = LogFactory.getLog(TestFileUtil.class); private static final Log LOG = LogFactory.getLog(TestFileUtil.class);
@ -48,14 +55,14 @@ public class TestFileUtil {
private static final String TEST_ROOT_DIR = System.getProperty( private static final String TEST_ROOT_DIR = System.getProperty(
"test.build.data", "/tmp") + "/fu"; "test.build.data", "/tmp") + "/fu";
private static final File TEST_DIR = new File(TEST_ROOT_DIR); private static final File TEST_DIR = new File(TEST_ROOT_DIR);
private static String FILE = "x"; private static final String FILE = "x";
private static String LINK = "y"; private static final String LINK = "y";
private static String DIR = "dir"; private static final String DIR = "dir";
private File del = new File(TEST_DIR, "del"); private final File del = new File(TEST_DIR, "del");
private File tmp = new File(TEST_DIR, "tmp"); private final File tmp = new File(TEST_DIR, "tmp");
private File dir1 = new File(del, DIR + "1"); private final File dir1 = new File(del, DIR + "1");
private File dir2 = new File(del, DIR + "2"); private final File dir2 = new File(del, DIR + "2");
private File partitioned = new File(TEST_DIR, "partitioned"); private final File partitioned = new File(TEST_DIR, "partitioned");
/** /**
* Creates multiple directories for testing. * Creates multiple directories for testing.
@ -116,17 +123,17 @@ public class TestFileUtil {
* @param contents String non-null file contents. * @param contents String non-null file contents.
* @throws IOException if an I/O error occurs. * @throws IOException if an I/O error occurs.
*/ */
private void createFile(File directory, String name, String contents) private File createFile(File directory, String name, String contents)
throws IOException { throws IOException {
File newFile = new File(directory, name); File newFile = new File(directory, name);
PrintWriter pw = new PrintWriter(newFile); PrintWriter pw = new PrintWriter(newFile);
try { try {
pw.println(contents); pw.println(contents);
} }
finally { finally {
pw.close(); pw.close();
} }
return newFile;
} }
@Test (timeout = 30000) @Test (timeout = 30000)
@ -553,14 +560,283 @@ public class TestFileUtil {
* @throws IOException * @throws IOException
*/ */
@Test (timeout = 30000) @Test (timeout = 30000)
public void testGetDU() throws IOException { public void testGetDU() throws Exception {
setupDirs(); setupDirs();
long du = FileUtil.getDU(TEST_DIR); long du = FileUtil.getDU(TEST_DIR);
// Only two files (in partitioned). Each has 3 characters + system-specific // Only two files (in partitioned). Each has 3 characters + system-specific
// line separator. // line separator.
long expected = 2 * (3 + System.getProperty("line.separator").length()); final long expected = 2 * (3 + System.getProperty("line.separator").length());
Assert.assertEquals(expected, du); Assert.assertEquals(expected, du);
// target file does not exist:
final File doesNotExist = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
long duDoesNotExist = FileUtil.getDU(doesNotExist);
assertEquals(0, duDoesNotExist);
// target file is not a directory:
File notADirectory = new File(partitioned, "part-r-00000");
long duNotADirectoryActual = FileUtil.getDU(notADirectory);
long duNotADirectoryExpected = 3 + System.getProperty("line.separator").length();
assertEquals(duNotADirectoryExpected, duNotADirectoryActual);
try {
// one of target files is not accessible, but the containing directory
// is accessible:
try {
FileUtil.chmod(notADirectory.getAbsolutePath(), "0000");
} catch (InterruptedException ie) {
// should never happen since that method never throws InterruptedException.
assertNull(ie);
}
assertFalse(notADirectory.canRead());
final long du3 = FileUtil.getDU(partitioned);
assertEquals(expected, du3);
// some target files and containing directory are not accessible:
try {
FileUtil.chmod(partitioned.getAbsolutePath(), "0000");
} catch (InterruptedException ie) {
// should never happen since that method never throws InterruptedException.
assertNull(ie);
}
assertFalse(partitioned.canRead());
final long du4 = FileUtil.getDU(partitioned);
assertEquals(0, du4);
} finally {
// Restore the permissions so that we can delete the folder
// in @After method:
FileUtil.chmod(partitioned.getAbsolutePath(), "0777", true/*recursive*/);
}
}
@Test (timeout = 30000)
public void testUnTar() throws IOException {
setupDirs();
// make a simple tar:
final File simpleTar = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleTar);
TarOutputStream tos = new TarOutputStream(os);
try {
TarEntry te = new TarEntry("foo");
byte[] data = "some-content".getBytes("UTF-8");
te.setSize(data.length);
tos.putNextEntry(te);
tos.write(data);
tos.closeEntry();
tos.flush();
tos.finish();
} finally {
tos.close();
}
// successfully untar it into an existing dir:
FileUtil.unTar(simpleTar, tmp);
// check result:
assertTrue(new File(tmp, "foo").exists());
assertEquals(12, new File(tmp, "foo").length());
final File regularFile = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
regularFile.createNewFile();
assertTrue(regularFile.exists());
try {
FileUtil.unTar(simpleTar, regularFile);
assertTrue("An IOException expected.", false);
} catch (IOException ioe) {
// okay
}
}
@Test (timeout = 30000)
public void testReplaceFile() throws IOException {
setupDirs();
final File srcFile = new File(tmp, "src");
// src exists, and target does not exist:
srcFile.createNewFile();
assertTrue(srcFile.exists());
final File targetFile = new File(tmp, "target");
assertTrue(!targetFile.exists());
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(!srcFile.exists());
assertTrue(targetFile.exists());
// src exists and target is a regular file:
srcFile.createNewFile();
assertTrue(srcFile.exists());
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(!srcFile.exists());
assertTrue(targetFile.exists());
// src exists, and target is a non-empty directory:
srcFile.createNewFile();
assertTrue(srcFile.exists());
targetFile.delete();
targetFile.mkdirs();
File obstacle = new File(targetFile, "obstacle");
obstacle.createNewFile();
assertTrue(obstacle.exists());
assertTrue(targetFile.exists() && targetFile.isDirectory());
try {
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(false);
} catch (IOException ioe) {
// okay
}
// check up the post-condition: nothing is deleted:
assertTrue(srcFile.exists());
assertTrue(targetFile.exists() && targetFile.isDirectory());
assertTrue(obstacle.exists());
}
@Test (timeout = 30000)
public void testCreateLocalTempFile() throws IOException {
setupDirs();
final File baseFile = new File(tmp, "base");
File tmp1 = FileUtil.createLocalTempFile(baseFile, "foo", false);
File tmp2 = FileUtil.createLocalTempFile(baseFile, "foo", true);
assertFalse(tmp1.getAbsolutePath().equals(baseFile.getAbsolutePath()));
assertFalse(tmp2.getAbsolutePath().equals(baseFile.getAbsolutePath()));
assertTrue(tmp1.exists() && tmp2.exists());
assertTrue(tmp1.canWrite() && tmp2.canWrite());
assertTrue(tmp1.canRead() && tmp2.canRead());
tmp1.delete();
tmp2.delete();
assertTrue(!tmp1.exists() && !tmp2.exists());
}
@Test (timeout = 30000)
public void testUnZip() throws IOException {
// make sa simple zip
setupDirs();
// make a simple tar:
final File simpleZip = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleZip);
ZipOutputStream tos = new ZipOutputStream(os);
try {
ZipEntry ze = new ZipEntry("foo");
byte[] data = "some-content".getBytes("UTF-8");
ze.setSize(data.length);
tos.putNextEntry(ze);
tos.write(data);
tos.closeEntry();
tos.flush();
tos.finish();
} finally {
tos.close();
}
// successfully untar it into an existing dir:
FileUtil.unZip(simpleZip, tmp);
// check result:
assertTrue(new File(tmp, "foo").exists());
assertEquals(12, new File(tmp, "foo").length());
final File regularFile = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
regularFile.createNewFile();
assertTrue(regularFile.exists());
try {
FileUtil.unZip(simpleZip, regularFile);
assertTrue("An IOException expected.", false);
} catch (IOException ioe) {
// okay
}
}
@Test (timeout = 30000)
/*
* Test method copy(FileSystem srcFS, Path src, File dst, boolean deleteSource, Configuration conf)
*/
public void testCopy5() throws IOException {
setupDirs();
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.newInstance(uri, conf);
final String content = "some-content";
File srcFile = createFile(tmp, "src", content);
Path srcPath = new Path(srcFile.toURI());
// copy regular file:
final File dest = new File(del, "dest");
boolean result = FileUtil.copy(fs, srcPath, dest, false, conf);
assertTrue(result);
assertTrue(dest.exists());
assertEquals(content.getBytes().length
+ System.getProperty("line.separator").getBytes().length, dest.length());
assertTrue(srcFile.exists()); // should not be deleted
// copy regular file, delete src:
dest.delete();
assertTrue(!dest.exists());
result = FileUtil.copy(fs, srcPath, dest, true, conf);
assertTrue(result);
assertTrue(dest.exists());
assertEquals(content.getBytes().length
+ System.getProperty("line.separator").getBytes().length, dest.length());
assertTrue(!srcFile.exists()); // should be deleted
// copy a dir:
dest.delete();
assertTrue(!dest.exists());
srcPath = new Path(partitioned.toURI());
result = FileUtil.copy(fs, srcPath, dest, true, conf);
assertTrue(result);
assertTrue(dest.exists() && dest.isDirectory());
File[] files = dest.listFiles();
assertTrue(files != null);
assertEquals(2, files.length);
for (File f: files) {
assertEquals(3
+ System.getProperty("line.separator").getBytes().length, f.length());
}
assertTrue(!partitioned.exists()); // should be deleted
}
@Test (timeout = 30000)
public void testStat2Paths1() {
assertNull(FileUtil.stat2Paths(null));
FileStatus[] fileStatuses = new FileStatus[0];
Path[] paths = FileUtil.stat2Paths(fileStatuses);
assertEquals(0, paths.length);
Path path1 = new Path("file://foo");
Path path2 = new Path("file://moo");
fileStatuses = new FileStatus[] {
new FileStatus(3, false, 0, 0, 0, path1),
new FileStatus(3, false, 0, 0, 0, path2)
};
paths = FileUtil.stat2Paths(fileStatuses);
assertEquals(2, paths.length);
assertEquals(paths[0], path1);
assertEquals(paths[1], path2);
}
@Test (timeout = 30000)
public void testStat2Paths2() {
Path defaultPath = new Path("file://default");
Path[] paths = FileUtil.stat2Paths(null, defaultPath);
assertEquals(1, paths.length);
assertEquals(defaultPath, paths[0]);
paths = FileUtil.stat2Paths(null, null);
assertTrue(paths != null);
assertEquals(1, paths.length);
assertEquals(null, paths[0]);
Path path1 = new Path("file://foo");
Path path2 = new Path("file://moo");
FileStatus[] fileStatuses = new FileStatus[] {
new FileStatus(3, false, 0, 0, 0, path1),
new FileStatus(3, false, 0, 0, 0, path2)
};
paths = FileUtil.stat2Paths(fileStatuses, defaultPath);
assertEquals(2, paths.length);
assertEquals(paths[0], path1);
assertEquals(paths[1], path2);
} }
@Test (timeout = 30000) @Test (timeout = 30000)

View File

@ -19,7 +19,9 @@
package org.apache.hadoop.fs; package org.apache.hadoop.fs;
import static org.junit.Assert.*; import static org.junit.Assert.*;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -106,7 +108,7 @@ public class TestFsShellCopy {
Path targetDir = new Path(testRoot, "target"); Path targetDir = new Path(testRoot, "target");
Path filePath = new Path(testRoot, new Path("srcFile")); Path filePath = new Path(testRoot, new Path("srcFile"));
lfs.create(filePath).close(); lfs.create(filePath).close();
checkPut(filePath, targetDir); checkPut(filePath, targetDir, false);
} }
@Test @Test
@ -119,10 +121,42 @@ public class TestFsShellCopy {
Path dirPath = new Path(testRoot, new Path("srcDir")); Path dirPath = new Path(testRoot, new Path("srcDir"));
lfs.mkdirs(dirPath); lfs.mkdirs(dirPath);
lfs.create(new Path(dirPath, "srcFile")).close(); lfs.create(new Path(dirPath, "srcFile")).close();
checkPut(dirPath, targetDir); checkPut(dirPath, targetDir, false);
} }
@Test
public void testCopyFileFromWindowsLocalPath() throws Exception {
assumeTrue(Path.WINDOWS);
String windowsTestRootPath = (new File(testRootDir.toUri().getPath()
.toString())).getAbsolutePath();
Path testRoot = new Path(windowsTestRootPath, "testPutFile");
lfs.delete(testRoot, true);
lfs.mkdirs(testRoot);
Path targetDir = new Path(testRoot, "target");
Path filePath = new Path(testRoot, new Path("srcFile"));
lfs.create(filePath).close();
checkPut(filePath, targetDir, true);
}
@Test
public void testCopyDirFromWindowsLocalPath() throws Exception {
assumeTrue(Path.WINDOWS);
String windowsTestRootPath = (new File(testRootDir.toUri().getPath()
.toString())).getAbsolutePath();
Path testRoot = new Path(windowsTestRootPath, "testPutDir");
lfs.delete(testRoot, true);
lfs.mkdirs(testRoot);
Path targetDir = new Path(testRoot, "target");
Path dirPath = new Path(testRoot, new Path("srcDir"));
lfs.mkdirs(dirPath);
lfs.create(new Path(dirPath, "srcFile")).close();
checkPut(dirPath, targetDir, true);
}
private void checkPut(Path srcPath, Path targetDir) private void checkPut(Path srcPath, Path targetDir, boolean useWindowsPath)
throws Exception { throws Exception {
lfs.delete(targetDir, true); lfs.delete(targetDir, true);
lfs.mkdirs(targetDir); lfs.mkdirs(targetDir);
@ -134,37 +168,37 @@ public class TestFsShellCopy {
// copy to new file, then again // copy to new file, then again
prepPut(dstPath, false, false); prepPut(dstPath, false, false);
checkPut(0, srcPath, dstPath); checkPut(0, srcPath, dstPath, useWindowsPath);
if (lfs.isFile(srcPath)) { if (lfs.isFile(srcPath)) {
checkPut(1, srcPath, dstPath); checkPut(1, srcPath, dstPath, useWindowsPath);
} else { // directory works because it copies into the dir } else { // directory works because it copies into the dir
// clear contents so the check won't think there are extra paths // clear contents so the check won't think there are extra paths
prepPut(dstPath, true, true); prepPut(dstPath, true, true);
checkPut(0, srcPath, dstPath); checkPut(0, srcPath, dstPath, useWindowsPath);
} }
// copy to non-existent subdir // copy to non-existent subdir
prepPut(childPath, false, false); prepPut(childPath, false, false);
checkPut(1, srcPath, dstPath); checkPut(1, srcPath, dstPath, useWindowsPath);
// copy into dir, then with another name // copy into dir, then with another name
prepPut(dstPath, true, true); prepPut(dstPath, true, true);
checkPut(0, srcPath, dstPath); checkPut(0, srcPath, dstPath, useWindowsPath);
prepPut(childPath, true, true); prepPut(childPath, true, true);
checkPut(0, srcPath, childPath); checkPut(0, srcPath, childPath, useWindowsPath);
// try to put to pwd with existing dir // try to put to pwd with existing dir
prepPut(targetDir, true, true); prepPut(targetDir, true, true);
checkPut(0, srcPath, null); checkPut(0, srcPath, null, useWindowsPath);
prepPut(targetDir, true, true); prepPut(targetDir, true, true);
checkPut(0, srcPath, new Path(".")); checkPut(0, srcPath, new Path("."), useWindowsPath);
// try to put to pwd with non-existent cwd // try to put to pwd with non-existent cwd
prepPut(dstPath, false, true); prepPut(dstPath, false, true);
lfs.setWorkingDirectory(dstPath); lfs.setWorkingDirectory(dstPath);
checkPut(1, srcPath, null); checkPut(1, srcPath, null, useWindowsPath);
prepPut(dstPath, false, true); prepPut(dstPath, false, true);
checkPut(1, srcPath, new Path(".")); checkPut(1, srcPath, new Path("."), useWindowsPath);
} }
private void prepPut(Path dst, boolean create, private void prepPut(Path dst, boolean create,
@ -183,12 +217,17 @@ public class TestFsShellCopy {
} }
} }
private void checkPut(int exitCode, Path src, Path dest) throws Exception { private void checkPut(int exitCode, Path src, Path dest,
boolean useWindowsPath) throws Exception {
String argv[] = null; String argv[] = null;
String srcPath = src.toString();
if (useWindowsPath) {
srcPath = (new File(srcPath)).getAbsolutePath();
}
if (dest != null) { if (dest != null) {
argv = new String[]{ "-put", src.toString(), pathAsString(dest) }; argv = new String[]{ "-put", srcPath, pathAsString(dest) };
} else { } else {
argv = new String[]{ "-put", src.toString() }; argv = new String[]{ "-put", srcPath };
dest = new Path(Path.CUR_DIR); dest = new Path(Path.CUR_DIR);
} }
@ -418,6 +457,34 @@ public class TestFsShellCopy {
assertTrue(lfs.exists(srcDir)); assertTrue(lfs.exists(srcDir));
} }
@Test
public void testMoveFromWindowsLocalPath() throws Exception {
assumeTrue(Path.WINDOWS);
Path testRoot = new Path(testRootDir, "testPutFile");
lfs.delete(testRoot, true);
lfs.mkdirs(testRoot);
Path target = new Path(testRoot, "target");
Path srcFile = new Path(testRoot, new Path("srcFile"));
lfs.createNewFile(srcFile);
String winSrcFile = (new File(srcFile.toUri().getPath()
.toString())).getAbsolutePath();
shellRun(0, "-moveFromLocal", winSrcFile, target.toString());
assertFalse(lfs.exists(srcFile));
assertTrue(lfs.exists(target));
assertTrue(lfs.isFile(target));
}
@Test
public void testGetWindowsLocalPath() throws Exception {
assumeTrue(Path.WINDOWS);
String winDstFile = (new File(dstPath.toUri().getPath()
.toString())).getAbsolutePath();
shellRun(0, "-get", srcPath.toString(), winDstFile);
checkPath(dstPath, false);
}
private void createFile(Path ... paths) throws IOException { private void createFile(Path ... paths) throws IOException {
for (Path path : paths) { for (Path path : paths) {
FSDataOutputStream out = lfs.create(path); FSDataOutputStream out = lfs.create(path);

View File

@ -18,14 +18,155 @@
package org.apache.hadoop.fs; package org.apache.hadoop.fs;
import java.io.IOException; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
import org.junit.Assert; import org.junit.Assert;
import static org.junit.Assert.*;
import org.junit.Test; import org.junit.Test;
import java.io.IOException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.EnumSet;
import java.util.Iterator;
import static org.apache.hadoop.fs.Options.ChecksumOpt;
import static org.apache.hadoop.fs.Options.CreateOpts;
import static org.apache.hadoop.fs.Options.Rename;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@SuppressWarnings("deprecation")
public class TestHarFileSystem { public class TestHarFileSystem {
public static final Log LOG = LogFactory.getLog(TestHarFileSystem.class);
/**
* FileSystem methods that must not be overwritten by
* {@link HarFileSystem}. Either because there is a default implementation
* already available or because it is not relevant.
*/
@SuppressWarnings("deprecation")
private interface MustNotImplement {
public BlockLocation[] getFileBlockLocations(Path p, long start, long len);
public long getLength(Path f);
public FSDataOutputStream append(Path f, int bufferSize);
public void rename(Path src, Path dst, Rename... options);
public boolean exists(Path f);
public boolean isDirectory(Path f);
public boolean isFile(Path f);
public boolean createNewFile(Path f);
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException;
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException;
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress, ChecksumOpt checksumOpt);
public boolean mkdirs(Path f);
public FSDataInputStream open(Path f);
public FSDataOutputStream create(Path f);
public FSDataOutputStream create(Path f, boolean overwrite);
public FSDataOutputStream create(Path f, Progressable progress);
public FSDataOutputStream create(Path f, short replication);
public FSDataOutputStream create(Path f, short replication,
Progressable progress);
public FSDataOutputStream create(Path f, boolean overwrite,
int bufferSize);
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
Progressable progress);
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
short replication, long blockSize);
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
short replication, long blockSize, Progressable progress);
public FSDataOutputStream create(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication,
long blockSize, Progressable progress) throws IOException;
public FSDataOutputStream create(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication,
long blockSize, Progressable progress, ChecksumOpt checksumOpt)
throws IOException;
public String getName();
public boolean delete(Path f);
public short getReplication(Path src);
public void processDeleteOnExit();
public ContentSummary getContentSummary(Path f);
public FsStatus getStatus();
public FileStatus[] listStatus(Path f, PathFilter filter);
public FileStatus[] listStatus(Path[] files);
public FileStatus[] listStatus(Path[] files, PathFilter filter);
public FileStatus[] globStatus(Path pathPattern);
public FileStatus[] globStatus(Path pathPattern, PathFilter filter);
public Iterator<LocatedFileStatus> listFiles(Path path,
boolean isRecursive);
public Iterator<LocatedFileStatus> listLocatedStatus(Path f);
public Iterator<LocatedFileStatus> listLocatedStatus(Path f,
PathFilter filter);
public void copyFromLocalFile(Path src, Path dst);
public void moveFromLocalFile(Path[] srcs, Path dst);
public void moveFromLocalFile(Path src, Path dst);
public void copyToLocalFile(Path src, Path dst);
public void copyToLocalFile(boolean delSrc, Path src, Path dst,
boolean useRawLocalFileSystem);
public void moveToLocalFile(Path src, Path dst);
public long getBlockSize(Path f);
public FSDataOutputStream primitiveCreate(Path f,
EnumSet<CreateFlag> createFlag, CreateOpts... opts);
public void primitiveMkdir(Path f, FsPermission absolutePermission,
boolean createParent);
public int getDefaultPort();
public String getCanonicalServiceName();
public Token<?> getDelegationToken(String renewer) throws IOException;
public boolean deleteOnExit(Path f) throws IOException;
public boolean cancelDeleteOnExit(Path f) throws IOException;
public Token<?>[] addDelegationTokens(String renewer, Credentials creds)
throws IOException;
public Path fixRelativePart(Path p);
public void concat(Path trg, Path [] psrcs) throws IOException;
public FSDataOutputStream primitiveCreate(Path f,
FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
short replication, long blockSize, Progressable progress,
ChecksumOpt checksumOpt) throws IOException;
public boolean primitiveMkdir(Path f, FsPermission absolutePermission)
throws IOException;
public RemoteIterator<Path> listCorruptFileBlocks(Path path)
throws IOException;
public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
throws IOException;
public void createSymlink(Path target, Path link, boolean createParent)
throws IOException;
public FileStatus getFileLinkStatus(Path f) throws IOException;
public boolean supportsSymlinks();
public Path getLinkTarget(Path f) throws IOException;
public Path resolveLink(Path f) throws IOException;
public void setVerifyChecksum(boolean verifyChecksum);
public void setWriteChecksum(boolean writeChecksum);
public Path createSnapshot(Path path, String snapshotName) throws
IOException;
public void renameSnapshot(Path path, String snapshotOldName,
String snapshotNewName) throws IOException;
public void deleteSnapshot(Path path, String snapshotName)
throws IOException;
}
@Test @Test
public void testHarUri() { public void testHarUri() {
final Configuration conf = new Configuration(); final Configuration conf = new Configuration();
@ -44,8 +185,7 @@ public class TestHarFileSystem {
p.getFileSystem(conf); p.getFileSystem(conf);
Assert.fail(p + " is an invalid path."); Assert.fail(p + " is an invalid path.");
} catch (IOException e) { } catch (IOException e) {
System.out.println("GOOD: Got an exception."); // Expected
e.printStackTrace(System.out);
} }
} }
@ -133,6 +273,37 @@ public class TestHarFileSystem {
assertEquals(b[1].getOffset(), 128); assertEquals(b[1].getOffset(), 128);
assertEquals(b[1].getLength(), 384); assertEquals(b[1].getLength(), 384);
} }
}
@Test
public void testInheritedMethodsImplemented() throws Exception {
int errors = 0;
for (Method m : FileSystem.class.getDeclaredMethods()) {
if (Modifier.isStatic(m.getModifiers()) ||
Modifier.isPrivate(m.getModifiers()) ||
Modifier.isFinal(m.getModifiers())) {
continue;
}
try {
MustNotImplement.class.getMethod(m.getName(), m.getParameterTypes());
try {
HarFileSystem.class.getDeclaredMethod(m.getName(), m.getParameterTypes());
LOG.error("HarFileSystem MUST not implement " + m);
errors++;
} catch (NoSuchMethodException ex) {
// Expected
}
} catch (NoSuchMethodException exc) {
try {
HarFileSystem.class.getDeclaredMethod(m.getName(), m.getParameterTypes());
} catch (NoSuchMethodException exc2) {
LOG.error("HarFileSystem MUST implement " + m);
errors++;
}
}
}
assertTrue((errors + " methods were not overridden correctly - see log"),
errors <= 0);
} }
} }

View File

@ -18,14 +18,6 @@
package org.apache.hadoop.fs; package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell;
@ -34,6 +26,14 @@ import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.HashSet;
import java.util.Set;
import static org.junit.Assert.*;
/** /**
* This test class checks basic operations with {@link HarFileSystem} including * This test class checks basic operations with {@link HarFileSystem} including
* various initialization cases, getters, and modification methods. * various initialization cases, getters, and modification methods.
@ -69,7 +69,7 @@ public class TestHarFileSystemBasics {
/* /*
* creates and returns fully initialized HarFileSystem * creates and returns fully initialized HarFileSystem
*/ */
private HarFileSystem createHarFileSysten(final Configuration conf) private HarFileSystem createHarFileSystem(final Configuration conf)
throws Exception { throws Exception {
localFileSystem = FileSystem.getLocal(conf); localFileSystem = FileSystem.getLocal(conf);
localFileSystem.initialize(new URI("file:///"), conf); localFileSystem.initialize(new URI("file:///"), conf);
@ -130,7 +130,7 @@ public class TestHarFileSystemBasics {
} }
// create Har to test: // create Har to test:
conf = new Configuration(); conf = new Configuration();
harFileSystem = createHarFileSysten(conf); harFileSystem = createHarFileSystem(conf);
} }
@After @After
@ -232,6 +232,32 @@ public class TestHarFileSystemBasics {
assertTrue(p2.toUri().toString().startsWith("har://file-localhost/")); assertTrue(p2.toUri().toString().startsWith("har://file-localhost/"));
} }
@Test
public void testListLocatedStatus() throws Exception {
String testHarPath = this.getClass().getResource("/test.har").getPath();
URI uri = new URI("har://" + testHarPath);
HarFileSystem hfs = new HarFileSystem(localFileSystem);
hfs.initialize(uri, new Configuration());
// test.har has the following contents:
// dir1/1.txt
// dir1/2.txt
Set<String> expectedFileNames = new HashSet<String>();
expectedFileNames.add("1.txt");
expectedFileNames.add("2.txt");
// List contents of dir, and ensure we find all expected files
Path path = new Path("dir1");
RemoteIterator<LocatedFileStatus> fileList = hfs.listLocatedStatus(path);
while (fileList.hasNext()) {
String fileName = fileList.next().getPath().getName();
assertTrue(fileName + " not in expected files list", expectedFileNames.contains(fileName));
expectedFileNames.remove(fileName);
}
assertEquals("Didn't find all of the expected file names: " + expectedFileNames,
0, expectedFileNames.size());
}
// ========== Negative: // ========== Negative:
@Test @Test

View File

@ -54,7 +54,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
@Before @Before
public void setup() throws Exception { public void setup() throws Exception {
HttpConfig.setSecure(true); HttpConfig.setPolicy(HttpConfig.Policy.HTTPS_ONLY);
File base = new File(BASEDIR); File base = new File(BASEDIR);
FileUtil.fullyDelete(base); FileUtil.fullyDelete(base);
base.mkdirs(); base.mkdirs();
@ -89,7 +89,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
String classpathDir = String classpathDir =
KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class); KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
new File(classpathDir, CONFIG_SITE_XML).delete(); new File(classpathDir, CONFIG_SITE_XML).delete();
HttpConfig.setSecure(false); HttpConfig.setPolicy(HttpConfig.Policy.HTTP_ONLY);
} }

View File

@ -19,18 +19,23 @@
package org.apache.hadoop.io; package org.apache.hadoop.io;
import java.io.*; import java.io.*;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.commons.logging.*; import org.apache.commons.logging.*;
import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.conf.*; import org.apache.hadoop.conf.*;
/** Support for flat files of binary key/value pairs. */ /** Support for flat files of binary key/value pairs. */
public class TestArrayFile extends TestCase { public class TestArrayFile extends TestCase {
private static final Log LOG = LogFactory.getLog(TestArrayFile.class); private static final Log LOG = LogFactory.getLog(TestArrayFile.class);
private static String FILE =
System.getProperty("test.build.data",".") + "/test.array"; private static final Path TEST_DIR = new Path(
System.getProperty("test.build.data", "/tmp"),
TestMapFile.class.getSimpleName());
private static String TEST_FILE = new Path(TEST_DIR, "test.array").toString();
public TestArrayFile(String name) { public TestArrayFile(String name) {
super(name); super(name);
@ -40,15 +45,15 @@ public class TestArrayFile extends TestCase {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf); FileSystem fs = FileSystem.getLocal(conf);
RandomDatum[] data = generate(10000); RandomDatum[] data = generate(10000);
writeTest(fs, data, FILE); writeTest(fs, data, TEST_FILE);
readTest(fs, data, FILE, conf); readTest(fs, data, TEST_FILE, conf);
} }
public void testEmptyFile() throws Exception { public void testEmptyFile() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf); FileSystem fs = FileSystem.getLocal(conf);
writeTest(fs, new RandomDatum[0], FILE); writeTest(fs, new RandomDatum[0], TEST_FILE);
ArrayFile.Reader reader = new ArrayFile.Reader(fs, FILE, conf); ArrayFile.Reader reader = new ArrayFile.Reader(fs, TEST_FILE, conf);
assertNull(reader.get(0, new RandomDatum())); assertNull(reader.get(0, new RandomDatum()));
reader.close(); reader.close();
} }
@ -87,31 +92,75 @@ public class TestArrayFile extends TestCase {
LOG.debug("reading " + data.length + " debug"); LOG.debug("reading " + data.length + " debug");
} }
ArrayFile.Reader reader = new ArrayFile.Reader(fs, file, conf); ArrayFile.Reader reader = new ArrayFile.Reader(fs, file, conf);
for (int i = 0; i < data.length; i++) { // try forwards try {
reader.get(i, v); for (int i = 0; i < data.length; i++) { // try forwards
if (!v.equals(data[i])) { reader.get(i, v);
throw new RuntimeException("wrong value at " + i); if (!v.equals(data[i])) {
throw new RuntimeException("wrong value at " + i);
}
} }
} for (int i = data.length-1; i >= 0; i--) { // then backwards
for (int i = data.length-1; i >= 0; i--) { // then backwards reader.get(i, v);
reader.get(i, v); if (!v.equals(data[i])) {
if (!v.equals(data[i])) { throw new RuntimeException("wrong value at " + i);
throw new RuntimeException("wrong value at " + i); }
} }
} if(LOG.isDebugEnabled()) {
reader.close(); LOG.debug("done reading " + data.length + " debug");
if(LOG.isDebugEnabled()) { }
LOG.debug("done reading " + data.length + " debug"); } finally {
reader.close();
} }
} }
/**
* test on {@link ArrayFile.Reader} iteration methods
* <pre>
* {@code next(), seek()} in and out of range.
* </pre>
*/
public void testArrayFileIteration() {
int SIZE = 10;
Configuration conf = new Configuration();
try {
FileSystem fs = FileSystem.get(conf);
ArrayFile.Writer writer = new ArrayFile.Writer(conf, fs, TEST_FILE,
LongWritable.class, CompressionType.RECORD, defaultProgressable);
assertNotNull("testArrayFileIteration error !!!", writer);
for (int i = 0; i < SIZE; i++)
writer.append(new LongWritable(i));
writer.close();
ArrayFile.Reader reader = new ArrayFile.Reader(fs, TEST_FILE, conf);
LongWritable nextWritable = new LongWritable(0);
for (int i = 0; i < SIZE; i++) {
nextWritable = (LongWritable)reader.next(nextWritable);
assertEquals(nextWritable.get(), i);
}
assertTrue("testArrayFileIteration seek error !!!",
reader.seek(new LongWritable(6)));
nextWritable = (LongWritable) reader.next(nextWritable);
assertTrue("testArrayFileIteration error !!!", reader.key() == 7);
assertTrue("testArrayFileIteration error !!!",
nextWritable.equals(new LongWritable(7)));
assertFalse("testArrayFileIteration error !!!",
reader.seek(new LongWritable(SIZE + 5)));
reader.close();
} catch (Exception ex) {
fail("testArrayFileWriterConstruction error !!!");
}
}
/** For debugging and testing. */ /** For debugging and testing. */
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
int count = 1024 * 1024; int count = 1024 * 1024;
boolean create = true; boolean create = true;
boolean check = true; boolean check = true;
String file = FILE; String file = TEST_FILE;
String usage = "Usage: TestArrayFile [-count N] [-nocreate] [-nocheck] file"; String usage = "Usage: TestArrayFile [-count N] [-nocreate] [-nocheck] file";
if (args.length == 0) { if (args.length == 0) {
@ -160,4 +209,11 @@ public class TestArrayFile extends TestCase {
fs.close(); fs.close();
} }
} }
private static final Progressable defaultProgressable = new Progressable() {
@Override
public void progress() {
}
};
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -20,6 +20,8 @@ package org.apache.hadoop.io;
import java.io.*; import java.io.*;
import org.junit.Assert;
import junit.framework.TestCase; import junit.framework.TestCase;
/** Unit tests for ArrayWritable */ /** Unit tests for ArrayWritable */
@ -61,4 +63,50 @@ public class TestArrayWritable extends TestCase {
assertEquals(destElements[i],elements[i]); assertEquals(destElements[i],elements[i]);
} }
} }
/**
* test {@link ArrayWritable} toArray() method
*/
public void testArrayWritableToArray() {
Text[] elements = {new Text("zero"), new Text("one"), new Text("two")};
TextArrayWritable arrayWritable = new TextArrayWritable();
arrayWritable.set(elements);
Object array = arrayWritable.toArray();
assertTrue("TestArrayWritable testArrayWritableToArray error!!! ", array instanceof Text[]);
Text[] destElements = (Text[]) array;
for (int i = 0; i < elements.length; i++) {
assertEquals(destElements[i], elements[i]);
}
}
/**
* test {@link ArrayWritable} constructor with null
*/
public void testNullArgument() {
try {
Class<? extends Writable> valueClass = null;
new ArrayWritable(valueClass);
fail("testNullArgument error !!!");
} catch (IllegalArgumentException exp) {
//should be for test pass
} catch (Exception e) {
fail("testNullArgument error !!!");
}
}
/**
* test {@link ArrayWritable} constructor with {@code String[]} as a parameter
*/
@SuppressWarnings("deprecation")
public void testArrayWritableStringConstructor() {
String[] original = { "test1", "test2", "test3" };
ArrayWritable arrayWritable = new ArrayWritable(original);
assertEquals("testArrayWritableStringConstructor class error!!!",
UTF8.class, arrayWritable.getValueClass());
Assert.assertArrayEquals("testArrayWritableStringConstructor toString error!!!",
original, arrayWritable.toStrings());
}
} }

View File

@ -18,28 +18,53 @@
package org.apache.hadoop.io; package org.apache.hadoop.io;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import junit.framework.TestCase; import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.util.Progressable;
import org.junit.Assert;
public class TestBloomMapFile extends TestCase { public class TestBloomMapFile extends TestCase {
private static Configuration conf = new Configuration(); private static Configuration conf = new Configuration();
private static final Path TEST_ROOT = new Path(
System.getProperty("test.build.data", "/tmp"),
TestMapFile.class.getSimpleName());
private static final Path TEST_DIR = new Path(TEST_ROOT, "testfile");
private static final Path TEST_FILE = new Path(TEST_ROOT, "testfile");
@Override
public void setUp() throws Exception {
LocalFileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(TEST_ROOT) && !fs.delete(TEST_ROOT, true)) {
Assert.fail("Can't clean up test root dir");
}
fs.mkdirs(TEST_ROOT);
}
@SuppressWarnings("deprecation")
public void testMembershipTest() throws Exception { public void testMembershipTest() throws Exception {
// write the file // write the file
Path dirName = new Path(System.getProperty("test.build.data",".") +
getName() + ".bloommapfile");
FileSystem fs = FileSystem.getLocal(conf); FileSystem fs = FileSystem.getLocal(conf);
Path qualifiedDirName = fs.makeQualified(dirName); Path qualifiedDirName = fs.makeQualified(TEST_DIR);
conf.setInt("io.mapfile.bloom.size", 2048); conf.setInt("io.mapfile.bloom.size", 2048);
BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, fs, BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, fs,
qualifiedDirName.toString(), IntWritable.class, Text.class); qualifiedDirName.toString(), IntWritable.class, Text.class);
IntWritable key = new IntWritable(); IntWritable key = new IntWritable();
Text value = new Text(); Text value = new Text();
for (int i = 0; i < 2000; i += 2) { for (int i = 0; i < 2000; i += 2) {
@ -48,7 +73,7 @@ public class TestBloomMapFile extends TestCase {
writer.append(key, value); writer.append(key, value);
} }
writer.close(); writer.close();
BloomMapFile.Reader reader = new BloomMapFile.Reader(fs, BloomMapFile.Reader reader = new BloomMapFile.Reader(fs,
qualifiedDirName.toString(), conf); qualifiedDirName.toString(), conf);
// check false positives rate // check false positives rate
@ -58,9 +83,11 @@ public class TestBloomMapFile extends TestCase {
key.set(i); key.set(i);
boolean exists = reader.probablyHasKey(key); boolean exists = reader.probablyHasKey(key);
if (i % 2 == 0) { if (i % 2 == 0) {
if (!exists) falseNeg++; if (!exists)
falseNeg++;
} else { } else {
if (exists) falsePos++; if (exists)
falsePos++;
} }
} }
reader.close(); reader.close();
@ -71,13 +98,13 @@ public class TestBloomMapFile extends TestCase {
assertTrue(falsePos < 2); assertTrue(falsePos < 2);
} }
private void checkMembershipVaryingSizedKeys(String name, List<Text> keys) throws Exception { @SuppressWarnings("deprecation")
Path dirName = new Path(System.getProperty("test.build.data",".") + private void checkMembershipVaryingSizedKeys(String name, List<Text> keys)
name + ".bloommapfile"); throws Exception {
FileSystem fs = FileSystem.getLocal(conf); FileSystem fs = FileSystem.getLocal(conf);
Path qualifiedDirName = fs.makeQualified(dirName); Path qualifiedDirName = fs.makeQualified(TEST_DIR);
BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, fs, BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, fs,
qualifiedDirName.toString(), Text.class, NullWritable.class); qualifiedDirName.toString(), Text.class, NullWritable.class);
for (Text key : keys) { for (Text key : keys) {
writer.append(key, NullWritable.get()); writer.append(key, NullWritable.get());
} }
@ -88,7 +115,8 @@ public class TestBloomMapFile extends TestCase {
qualifiedDirName.toString(), conf); qualifiedDirName.toString(), conf);
Collections.reverse(keys); Collections.reverse(keys);
for (Text key : keys) { for (Text key : keys) {
assertTrue("False negative for existing key " + key, reader.probablyHasKey(key)); assertTrue("False negative for existing key " + key,
reader.probablyHasKey(key));
} }
reader.close(); reader.close();
fs.delete(qualifiedDirName, true); fs.delete(qualifiedDirName, true);
@ -108,4 +136,171 @@ public class TestBloomMapFile extends TestCase {
checkMembershipVaryingSizedKeys(getName(), list); checkMembershipVaryingSizedKeys(getName(), list);
} }
/**
* test {@code BloomMapFile.delete()} method
*/
public void testDeleteFile() {
try {
FileSystem fs = FileSystem.getLocal(conf);
BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, TEST_FILE,
MapFile.Writer.keyClass(IntWritable.class),
MapFile.Writer.valueClass(Text.class));
assertNotNull("testDeleteFile error !!!", writer);
BloomMapFile.delete(fs, "." + TEST_FILE);
} catch (Exception ex) {
fail("unexpect ex in testDeleteFile !!!");
}
}
/**
* test {@link BloomMapFile.Reader} constructor with
* IOException
*/
public void testIOExceptionInWriterConstructor() {
Path dirNameSpy = org.mockito.Mockito.spy(TEST_FILE);
try {
BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, TEST_FILE,
MapFile.Writer.keyClass(IntWritable.class),
MapFile.Writer.valueClass(Text.class));
writer.append(new IntWritable(1), new Text("123124142"));
writer.close();
org.mockito.Mockito.when(dirNameSpy.getFileSystem(conf)).thenThrow(
new IOException());
BloomMapFile.Reader reader = new BloomMapFile.Reader(dirNameSpy, conf,
MapFile.Reader.comparator(new WritableComparator(IntWritable.class)));
assertNull("testIOExceptionInWriterConstructor error !!!",
reader.getBloomFilter());
reader.close();
} catch (Exception ex) {
fail("unexpect ex in testIOExceptionInWriterConstructor !!!");
}
}
/**
* test {@link BloomMapFile.Reader.get()} method
*/
public void testGetBloomMapFile() {
int SIZE = 10;
try {
BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, TEST_FILE,
MapFile.Writer.keyClass(IntWritable.class),
MapFile.Writer.valueClass(Text.class));
for (int i = 0; i < SIZE; i++) {
writer.append(new IntWritable(i), new Text());
}
writer.close();
BloomMapFile.Reader reader = new BloomMapFile.Reader(TEST_FILE, conf,
MapFile.Reader.comparator(new WritableComparator(IntWritable.class)));
for (int i = 0; i < SIZE; i++) {
assertNotNull("testGetBloomMapFile error !!!",
reader.get(new IntWritable(i), new Text()));
}
assertNull("testGetBloomMapFile error !!!",
reader.get(new IntWritable(SIZE + 5), new Text()));
reader.close();
} catch (Exception ex) {
fail("unexpect ex in testGetBloomMapFile !!!");
}
}
/**
* test {@code BloomMapFile.Writer} constructors
*/
@SuppressWarnings("deprecation")
public void testBloomMapFileConstructors() {
try {
FileSystem ts = FileSystem.get(conf);
String testFileName = TEST_FILE.toString();
BloomMapFile.Writer writer1 = new BloomMapFile.Writer(conf, ts,
testFileName, IntWritable.class, Text.class, CompressionType.BLOCK,
defaultCodec, defaultProgress);
assertNotNull("testBloomMapFileConstructors error !!!", writer1);
BloomMapFile.Writer writer2 = new BloomMapFile.Writer(conf, ts,
testFileName, IntWritable.class, Text.class, CompressionType.BLOCK,
defaultProgress);
assertNotNull("testBloomMapFileConstructors error !!!", writer2);
BloomMapFile.Writer writer3 = new BloomMapFile.Writer(conf, ts,
testFileName, IntWritable.class, Text.class, CompressionType.BLOCK);
assertNotNull("testBloomMapFileConstructors error !!!", writer3);
BloomMapFile.Writer writer4 = new BloomMapFile.Writer(conf, ts,
testFileName, IntWritable.class, Text.class, CompressionType.RECORD,
defaultCodec, defaultProgress);
assertNotNull("testBloomMapFileConstructors error !!!", writer4);
BloomMapFile.Writer writer5 = new BloomMapFile.Writer(conf, ts,
testFileName, IntWritable.class, Text.class, CompressionType.RECORD,
defaultProgress);
assertNotNull("testBloomMapFileConstructors error !!!", writer5);
BloomMapFile.Writer writer6 = new BloomMapFile.Writer(conf, ts,
testFileName, IntWritable.class, Text.class, CompressionType.RECORD);
assertNotNull("testBloomMapFileConstructors error !!!", writer6);
BloomMapFile.Writer writer7 = new BloomMapFile.Writer(conf, ts,
testFileName, WritableComparator.get(Text.class), Text.class);
assertNotNull("testBloomMapFileConstructors error !!!", writer7);
} catch (Exception ex) {
fail("testBloomMapFileConstructors error !!!");
}
}
static final Progressable defaultProgress = new Progressable() {
@Override
public void progress() {
}
};
static final CompressionCodec defaultCodec = new CompressionCodec() {
@Override
public String getDefaultExtension() {
return null;
}
@Override
public Class<? extends Decompressor> getDecompressorType() {
return null;
}
@Override
public Class<? extends Compressor> getCompressorType() {
return null;
}
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor) throws IOException {
return null;
}
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return null;
}
@Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor) throws IOException {
return null;
}
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return null;
}
@Override
public Decompressor createDecompressor() {
return null;
}
@Override
public Compressor createCompressor() {
return null;
}
};
} }

View File

@ -50,4 +50,28 @@ public class TestBooleanWritable {
out.flush(); out.flush();
return out; return out;
} }
/**
* test {@link BooleanWritable} methods hashCode(), equals(), compareTo()
*/
@Test
public void testCommonMethods() {
assertTrue("testCommonMethods1 error !!!", newInstance(true).equals(newInstance(true)));
assertTrue("testCommonMethods2 error !!!", newInstance(false).equals(newInstance(false)));
assertFalse("testCommonMethods3 error !!!", newInstance(false).equals(newInstance(true)));
assertTrue("testCommonMethods4 error !!!", checkHashCode(newInstance(true), newInstance(true)));
assertFalse("testCommonMethods5 error !!! ", checkHashCode(newInstance(true), newInstance(false)));
assertTrue("testCommonMethods6 error !!!", newInstance(true).compareTo(newInstance(false)) > 0 );
assertTrue("testCommonMethods7 error !!!", newInstance(false).compareTo(newInstance(true)) < 0 );
assertTrue("testCommonMethods8 error !!!", newInstance(false).compareTo(newInstance(false)) == 0 );
assertEquals("testCommonMethods9 error !!!", "true", newInstance(true).toString());
}
private boolean checkHashCode(BooleanWritable f, BooleanWritable s) {
return f.hashCode() == s.hashCode();
}
private static BooleanWritable newInstance(boolean flag) {
return new BooleanWritable(flag);
}
} }

View File

@ -133,5 +133,24 @@ public class TestBytesWritable {
assertTrue("buffer created with (array, len) has bad length", assertTrue("buffer created with (array, len) has bad length",
zeroBuf.getLength() == copyBuf.getLength()); zeroBuf.getLength() == copyBuf.getLength());
} }
/**
* test {@link ByteWritable}
* methods compareTo(), toString(), equals()
*/
@Test
public void testObjectCommonMethods() {
byte b = 0x9;
ByteWritable bw = new ByteWritable();
bw.set(b);
assertTrue("testSetByteWritable error", bw.get() == b);
assertTrue("testSetByteWritable error < 0", bw.compareTo(new ByteWritable((byte)0xA)) < 0);
assertTrue("testSetByteWritable error > 0", bw.compareTo(new ByteWritable((byte)0x8)) > 0);
assertTrue("testSetByteWritable error == 0", bw.compareTo(new ByteWritable((byte)0x9)) == 0);
assertTrue("testSetByteWritable equals error !!!", bw.equals(new ByteWritable((byte)0x9)));
assertTrue("testSetByteWritable equals error !!!", ! bw.equals(new ByteWritable((byte)0xA)));
assertTrue("testSetByteWritable equals error !!!", ! bw.equals(new IntWritable(1)));
assertEquals("testSetByteWritable error ", "9", bw.toString());
}
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -20,6 +20,7 @@ package org.apache.hadoop.io;
import java.io.IOException; import java.io.IOException;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.Iterator;
import java.lang.reflect.Type; import java.lang.reflect.Type;
import junit.framework.TestCase; import junit.framework.TestCase;
@ -32,8 +33,8 @@ public class TestEnumSetWritable extends TestCase {
} }
EnumSet<TestEnumSet> nonEmptyFlag = EnumSet.of(TestEnumSet.APPEND); EnumSet<TestEnumSet> nonEmptyFlag = EnumSet.of(TestEnumSet.APPEND);
EnumSetWritable<TestEnumSet> nonEmptyFlagWritable = new EnumSetWritable<TestEnumSet>( EnumSetWritable<TestEnumSet> nonEmptyFlagWritable =
nonEmptyFlag); new EnumSetWritable<TestEnumSet>(nonEmptyFlag);
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public void testSerializeAndDeserializeNonEmpty() throws IOException { public void testSerializeAndDeserializeNonEmpty() throws IOException {
@ -60,11 +61,12 @@ public class TestEnumSetWritable extends TestCase {
} }
assertTrue( assertTrue(
"Instantiate empty EnumSetWritable with no element type class providesd should throw exception.", "Instantiation of empty EnumSetWritable with no element type class "
+ "provided should throw exception.",
gotException); gotException);
EnumSetWritable<TestEnumSet> emptyFlagWritable = new EnumSetWritable<TestEnumSet>( EnumSetWritable<TestEnumSet> emptyFlagWritable =
emptyFlag, TestEnumSet.class); new EnumSetWritable<TestEnumSet>(emptyFlag, TestEnumSet.class);
DataOutputBuffer out = new DataOutputBuffer(); DataOutputBuffer out = new DataOutputBuffer();
ObjectWritable.writeObject(out, emptyFlagWritable, emptyFlagWritable ObjectWritable.writeObject(out, emptyFlagWritable, emptyFlagWritable
.getClass(), null); .getClass(), null);
@ -86,11 +88,12 @@ public class TestEnumSetWritable extends TestCase {
} }
assertTrue( assertTrue(
"Instantiate empty EnumSetWritable with no element type class providesd should throw exception.", "Instantiation of empty EnumSetWritable with no element type class "
+ "provided should throw exception",
gotException); gotException);
EnumSetWritable<TestEnumSet> nullFlagWritable = new EnumSetWritable<TestEnumSet>( EnumSetWritable<TestEnumSet> nullFlagWritable =
null, TestEnumSet.class); new EnumSetWritable<TestEnumSet>(null, TestEnumSet.class);
DataOutputBuffer out = new DataOutputBuffer(); DataOutputBuffer out = new DataOutputBuffer();
ObjectWritable.writeObject(out, nullFlagWritable, nullFlagWritable ObjectWritable.writeObject(out, nullFlagWritable, nullFlagWritable
@ -105,10 +108,54 @@ public class TestEnumSetWritable extends TestCase {
public EnumSetWritable<TestEnumSet> testField; public EnumSetWritable<TestEnumSet> testField;
public void testAvroReflect() throws Exception { public void testAvroReflect() throws Exception {
String schema = "{\"type\":\"array\",\"items\":{\"type\":\"enum\",\"name\":\"TestEnumSet\",\"namespace\":\"org.apache.hadoop.io.TestEnumSetWritable$\",\"symbols\":[\"CREATE\",\"OVERWRITE\",\"APPEND\"]},\"java-class\":\"org.apache.hadoop.io.EnumSetWritable\"}"; String schema = "{\"type\":\"array\",\"items\":{\"type\":\"enum\","
+ "\"name\":\"TestEnumSet\","
+ "\"namespace\":\"org.apache.hadoop.io.TestEnumSetWritable$\","
+ "\"symbols\":[\"CREATE\",\"OVERWRITE\",\"APPEND\"]},"
+ "\"java-class\":\"org.apache.hadoop.io.EnumSetWritable\"}";
Type type = Type type =
TestEnumSetWritable.class.getField("testField").getGenericType(); TestEnumSetWritable.class.getField("testField").getGenericType();
AvroTestUtil.testReflect(nonEmptyFlagWritable, type, schema); AvroTestUtil.testReflect(nonEmptyFlagWritable, type, schema);
}
/**
* test {@link EnumSetWritable} equals() method
*/
public void testEnumSetWritableEquals() {
EnumSetWritable<TestEnumSet> eset1 = new EnumSetWritable<TestEnumSet>(
EnumSet.of(TestEnumSet.APPEND, TestEnumSet.CREATE), TestEnumSet.class);
EnumSetWritable<TestEnumSet> eset2 = new EnumSetWritable<TestEnumSet>(
EnumSet.of(TestEnumSet.APPEND, TestEnumSet.CREATE), TestEnumSet.class);
assertTrue("testEnumSetWritableEquals error !!!", eset1.equals(eset2));
assertFalse("testEnumSetWritableEquals error !!!",
eset1.equals(new EnumSetWritable<TestEnumSet>(EnumSet.of(
TestEnumSet.APPEND, TestEnumSet.CREATE, TestEnumSet.OVERWRITE),
TestEnumSet.class)));
assertTrue("testEnumSetWritableEquals getElementType error !!!", eset1
.getElementType().equals(TestEnumSet.class));
} }
/**
* test {@code EnumSetWritable.write(DataOutputBuffer out)}
* and iteration by TestEnumSet through iterator().
*/
public void testEnumSetWritableWriteRead() throws Exception {
EnumSetWritable<TestEnumSet> srcSet = new EnumSetWritable<TestEnumSet>(
EnumSet.of(TestEnumSet.APPEND, TestEnumSet.CREATE), TestEnumSet.class);
DataOutputBuffer out = new DataOutputBuffer();
srcSet.write(out);
EnumSetWritable<TestEnumSet> dstSet = new EnumSetWritable<TestEnumSet>();
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
dstSet.readFields(in);
EnumSet<TestEnumSet> result = dstSet.get();
Iterator<TestEnumSet> dstIter = result.iterator();
Iterator<TestEnumSet> srcIter = srcSet.iterator();
while (dstIter.hasNext() && srcIter.hasNext()) {
assertEquals("testEnumSetWritableWriteRead error !!!", dstIter.next(),
srcIter.next());
}
}
} }

View File

@ -17,29 +17,592 @@
*/ */
package org.apache.hadoop.io; package org.apache.hadoop.io;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.util.Progressable;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import junit.framework.TestCase; import static org.junit.Assert.*;
public class TestMapFile extends TestCase { import static org.mockito.Mockito.*;
public class TestMapFile {
private static final Path TEST_DIR = new Path(
System.getProperty("test.build.data", "/tmp"),
TestMapFile.class.getSimpleName());
private static Configuration conf = new Configuration(); private static Configuration conf = new Configuration();
@Before
public void setup() throws Exception {
LocalFileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(TEST_DIR) && !fs.delete(TEST_DIR, true)) {
Assert.fail("Can't clean up test root dir");
}
fs.mkdirs(TEST_DIR);
}
private static final Progressable defaultProgressable = new Progressable() {
@Override
public void progress() {
}
};
private static final CompressionCodec defaultCodec = new CompressionCodec() {
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return null;
}
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor) throws IOException {
return null;
}
@Override
public Class<? extends Compressor> getCompressorType() {
return null;
}
@Override
public Compressor createCompressor() {
return null;
}
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return null;
}
@Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor) throws IOException {
return null;
}
@Override
public Class<? extends Decompressor> getDecompressorType() {
return null;
}
@Override
public Decompressor createDecompressor() {
return null;
}
@Override
public String getDefaultExtension() {
return null;
}
};
private MapFile.Writer createWriter(String fileName,
Class<? extends WritableComparable<?>> keyClass,
Class<? extends Writable> valueClass) throws IOException {
Path dirName = new Path(TEST_DIR, fileName);
MapFile.Writer.setIndexInterval(conf, 4);
return new MapFile.Writer(conf, dirName, MapFile.Writer.keyClass(keyClass),
MapFile.Writer.valueClass(valueClass));
}
private MapFile.Reader createReader(String fileName,
Class<? extends WritableComparable<?>> keyClass) throws IOException {
Path dirName = new Path(TEST_DIR, fileName);
return new MapFile.Reader(dirName, conf,
MapFile.Reader.comparator(new WritableComparator(keyClass)));
}
/**
* test {@code MapFile.Reader.getClosest()} method
*
*/
@Test
public void testGetClosestOnCurrentApi() throws Exception {
final String TEST_PREFIX = "testGetClosestOnCurrentApi.mapfile";
MapFile.Writer writer = createWriter(TEST_PREFIX, Text.class, Text.class);
int FIRST_KEY = 1;
// Test keys: 11,21,31,...,91
for (int i = FIRST_KEY; i < 100; i += 10) {
Text t = new Text(Integer.toString(i));
writer.append(t, t);
}
writer.close();
MapFile.Reader reader = createReader(TEST_PREFIX, Text.class);
Text key = new Text("55");
Text value = new Text();
// Test get closest with step forward
Text closest = (Text) reader.getClosest(key, value);
assertEquals(new Text("61"), closest);
// Test get closest with step back
closest = (Text) reader.getClosest(key, value, true);
assertEquals(new Text("51"), closest);
// Test get closest when we pass explicit key
final Text explicitKey = new Text("21");
closest = (Text) reader.getClosest(explicitKey, value);
assertEquals(new Text("21"), explicitKey);
// Test what happens at boundaries. Assert if searching a key that is
// less than first key in the mapfile, that the first key is returned.
key = new Text("00");
closest = (Text) reader.getClosest(key, value);
assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
// Assert that null is returned if key is > last entry in mapfile.
key = new Text("92");
closest = (Text) reader.getClosest(key, value);
assertNull("Not null key in testGetClosestWithNewCode", closest);
// If we were looking for the key before, we should get the last key
closest = (Text) reader.getClosest(key, value, true);
assertEquals(new Text("91"), closest);
}
/**
* test {@code MapFile.Reader.midKey() } method
*/
@Test
public void testMidKeyOnCurrentApi() throws Exception {
// Write a mapfile of simple data: keys are
final String TEST_PREFIX = "testMidKeyOnCurrentApi.mapfile";
MapFile.Writer writer = createWriter(TEST_PREFIX, IntWritable.class,
IntWritable.class);
// 0,1,....9
int SIZE = 10;
for (int i = 0; i < SIZE; i++)
writer.append(new IntWritable(i), new IntWritable(i));
writer.close();
MapFile.Reader reader = createReader(TEST_PREFIX, IntWritable.class);
assertEquals(new IntWritable((SIZE - 1) / 2), reader.midKey());
}
/**
* test {@code MapFile.Writer.rename()} method
*/
@Test
public void testRename() {
final String NEW_FILE_NAME = "test-new.mapfile";
final String OLD_FILE_NAME = "test-old.mapfile";
try {
FileSystem fs = FileSystem.getLocal(conf);
MapFile.Writer writer = createWriter(OLD_FILE_NAME, IntWritable.class,
IntWritable.class);
writer.close();
MapFile.rename(fs, new Path(TEST_DIR, OLD_FILE_NAME).toString(),
new Path(TEST_DIR, NEW_FILE_NAME).toString());
MapFile.delete(fs, new Path(TEST_DIR, NEW_FILE_NAME).toString());
} catch (IOException ex) {
fail("testRename error " + ex);
}
}
/**
* test {@code MapFile.rename()}
* method with throwing {@code IOException}
*/
@Test
public void testRenameWithException() {
final String ERROR_MESSAGE = "Can't rename file";
final String NEW_FILE_NAME = "test-new.mapfile";
final String OLD_FILE_NAME = "test-old.mapfile";
try {
FileSystem fs = FileSystem.getLocal(conf);
FileSystem spyFs = spy(fs);
MapFile.Writer writer = createWriter(OLD_FILE_NAME, IntWritable.class,
IntWritable.class);
writer.close();
Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME);
Path newDir = new Path(TEST_DIR, NEW_FILE_NAME);
when(spyFs.rename(oldDir, newDir)).thenThrow(
new IOException(ERROR_MESSAGE));
MapFile.rename(spyFs, oldDir.toString(), newDir.toString());
fail("testRenameWithException no exception error !!!");
} catch (IOException ex) {
assertEquals("testRenameWithException invalid IOExceptionMessage !!!",
ex.getMessage(), ERROR_MESSAGE);
}
}
@Test
public void testRenameWithFalse() {
final String ERROR_MESSAGE = "Could not rename";
final String NEW_FILE_NAME = "test-new.mapfile";
final String OLD_FILE_NAME = "test-old.mapfile";
try {
FileSystem fs = FileSystem.getLocal(conf);
FileSystem spyFs = spy(fs);
MapFile.Writer writer = createWriter(OLD_FILE_NAME, IntWritable.class,
IntWritable.class);
writer.close();
Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME);
Path newDir = new Path(TEST_DIR, NEW_FILE_NAME);
when(spyFs.rename(oldDir, newDir)).thenReturn(false);
MapFile.rename(spyFs, oldDir.toString(), newDir.toString());
fail("testRenameWithException no exception error !!!");
} catch (IOException ex) {
assertTrue("testRenameWithFalse invalid IOExceptionMessage error !!!", ex
.getMessage().startsWith(ERROR_MESSAGE));
}
}
/**
* test throwing {@code IOException} in {@code MapFile.Writer} constructor
*/
@Test
public void testWriteWithFailDirCreation() {
String ERROR_MESSAGE = "Mkdirs failed to create directory";
Path dirName = new Path(TEST_DIR, "fail.mapfile");
MapFile.Writer writer = null;
try {
FileSystem fs = FileSystem.getLocal(conf);
FileSystem spyFs = spy(fs);
Path pathSpy = spy(dirName);
when(pathSpy.getFileSystem(conf)).thenReturn(spyFs);
when(spyFs.mkdirs(dirName)).thenReturn(false);
writer = new MapFile.Writer(conf, pathSpy,
MapFile.Writer.keyClass(IntWritable.class),
MapFile.Writer.valueClass(Text.class));
fail("testWriteWithFailDirCreation error !!!");
} catch (IOException ex) {
assertTrue("testWriteWithFailDirCreation ex error !!!", ex.getMessage()
.startsWith(ERROR_MESSAGE));
} finally {
if (writer != null)
try {
writer.close();
} catch (IOException e) {
}
}
}
/**
* test {@code MapFile.Reader.finalKey()} method
*/
@Test
public void testOnFinalKey() {
final String TEST_METHOD_KEY = "testOnFinalKey.mapfile";
int SIZE = 10;
try {
MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
IntWritable.class);
for (int i = 0; i < SIZE; i++)
writer.append(new IntWritable(i), new IntWritable(i));
writer.close();
MapFile.Reader reader = createReader(TEST_METHOD_KEY, IntWritable.class);
IntWritable expectedKey = new IntWritable(0);
reader.finalKey(expectedKey);
assertEquals("testOnFinalKey not same !!!", expectedKey, new IntWritable(
9));
} catch (IOException ex) {
fail("testOnFinalKey error !!!");
}
}
/**
* test {@code MapFile.Writer} constructor with key, value
* and validate it with {@code keyClass(), valueClass()} methods
*/
@Test
public void testKeyValueClasses() {
Class<? extends WritableComparable<?>> keyClass = IntWritable.class;
Class<?> valueClass = Text.class;
try {
createWriter("testKeyValueClasses.mapfile", IntWritable.class, Text.class);
assertNotNull("writer key class null error !!!",
MapFile.Writer.keyClass(keyClass));
assertNotNull("writer value class null error !!!",
MapFile.Writer.valueClass(valueClass));
} catch (IOException ex) {
fail(ex.getMessage());
}
}
/**
* test {@code MapFile.Reader.getClosest() } with wrong class key
*/
@Test
public void testReaderGetClosest() throws Exception {
final String TEST_METHOD_KEY = "testReaderWithWrongKeyClass.mapfile";
try {
MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
Text.class);
for (int i = 0; i < 10; i++)
writer.append(new IntWritable(i), new Text("value" + i));
writer.close();
MapFile.Reader reader = createReader(TEST_METHOD_KEY, Text.class);
reader.getClosest(new Text("2"), new Text(""));
fail("no excepted exception in testReaderWithWrongKeyClass !!!");
} catch (IOException ex) {
/* Should be thrown to pass the test */
}
}
/**
* test {@code MapFile.Writer.append() } with wrong key class
*/
@Test
public void testReaderWithWrongValueClass() {
final String TEST_METHOD_KEY = "testReaderWithWrongValueClass.mapfile";
try {
MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
Text.class);
writer.append(new IntWritable(0), new IntWritable(0));
fail("no excepted exception in testReaderWithWrongKeyClass !!!");
} catch (IOException ex) {
/* Should be thrown to pass the test */
}
}
/**
* test {@code MapFile.Reader.next(key, value)} for iteration.
*/
@Test
public void testReaderKeyIteration() {
final String TEST_METHOD_KEY = "testReaderKeyIteration.mapfile";
int SIZE = 10;
int ITERATIONS = 5;
try {
MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
Text.class);
int start = 0;
for (int i = 0; i < SIZE; i++)
writer.append(new IntWritable(i), new Text("Value:" + i));
writer.close();
MapFile.Reader reader = createReader(TEST_METHOD_KEY, IntWritable.class);
// test iteration
Writable startValue = new Text("Value:" + start);
int i = 0;
while (i++ < ITERATIONS) {
IntWritable key = new IntWritable(start);
Writable value = startValue;
while (reader.next(key, value)) {
assertNotNull(key);
assertNotNull(value);
}
reader.reset();
}
assertTrue("reader seek error !!!",
reader.seek(new IntWritable(SIZE / 2)));
assertFalse("reader seek error !!!",
reader.seek(new IntWritable(SIZE * 2)));
} catch (IOException ex) {
fail("reader seek error !!!");
}
}
/**
* test {@code MapFile.Writer.testFix} method
*/
@Test
public void testFix() {
final String INDEX_LESS_MAP_FILE = "testFix.mapfile";
int PAIR_SIZE = 20;
try {
FileSystem fs = FileSystem.getLocal(conf);
Path dir = new Path(TEST_DIR, INDEX_LESS_MAP_FILE);
MapFile.Writer writer = createWriter(INDEX_LESS_MAP_FILE,
IntWritable.class, Text.class);
for (int i = 0; i < PAIR_SIZE; i++)
writer.append(new IntWritable(0), new Text("value"));
writer.close();
File indexFile = new File(".", "." + INDEX_LESS_MAP_FILE + "/index");
boolean isDeleted = false;
if (indexFile.exists())
isDeleted = indexFile.delete();
if (isDeleted)
assertTrue("testFix error !!!",
MapFile.fix(fs, dir, IntWritable.class, Text.class, true, conf) == PAIR_SIZE);
} catch (Exception ex) {
fail("testFix error !!!");
}
}
/**
* test all available constructor for {@code MapFile.Writer}
*/
@Test
@SuppressWarnings("deprecation")
public void testDeprecatedConstructors() {
String path = new Path(TEST_DIR, "writes.mapfile").toString();
try {
FileSystem fs = FileSystem.getLocal(conf);
MapFile.Writer writer = new MapFile.Writer(conf, fs, path,
IntWritable.class, Text.class, CompressionType.RECORD);
assertNotNull(writer);
writer = new MapFile.Writer(conf, fs, path, IntWritable.class,
Text.class, CompressionType.RECORD, defaultProgressable);
assertNotNull(writer);
writer = new MapFile.Writer(conf, fs, path, IntWritable.class,
Text.class, CompressionType.RECORD, defaultCodec, defaultProgressable);
assertNotNull(writer);
writer = new MapFile.Writer(conf, fs, path,
WritableComparator.get(Text.class), Text.class);
assertNotNull(writer);
writer = new MapFile.Writer(conf, fs, path,
WritableComparator.get(Text.class), Text.class,
SequenceFile.CompressionType.RECORD);
assertNotNull(writer);
writer = new MapFile.Writer(conf, fs, path,
WritableComparator.get(Text.class), Text.class,
CompressionType.RECORD, defaultProgressable);
assertNotNull(writer);
writer.close();
MapFile.Reader reader = new MapFile.Reader(fs, path,
WritableComparator.get(IntWritable.class), conf);
assertNotNull(reader);
assertNotNull("reader key is null !!!", reader.getKeyClass());
assertNotNull("reader value in null", reader.getValueClass());
} catch (IOException e) {
fail(e.getMessage());
}
}
/**
* test {@code MapFile.Writer} constructor
* with IllegalArgumentException
*
*/
@Test
public void testKeyLessWriterCreation() {
MapFile.Writer writer = null;
try {
writer = new MapFile.Writer(conf, TEST_DIR);
fail("fail in testKeyLessWriterCreation !!!");
} catch (IllegalArgumentException ex) {
} catch (Exception e) {
fail("fail in testKeyLessWriterCreation. Other ex !!!");
} finally {
if (writer != null)
try {
writer.close();
} catch (IOException e) {
}
}
}
/**
* test {@code MapFile.Writer} constructor with IOException
*/
@Test
public void testPathExplosionWriterCreation() {
Path path = new Path(TEST_DIR, "testPathExplosionWriterCreation.mapfile");
String TEST_ERROR_MESSAGE = "Mkdirs failed to create directory "
+ path.getName();
MapFile.Writer writer = null;
try {
FileSystem fsSpy = spy(FileSystem.get(conf));
Path pathSpy = spy(path);
when(fsSpy.mkdirs(path)).thenThrow(new IOException(TEST_ERROR_MESSAGE));
when(pathSpy.getFileSystem(conf)).thenReturn(fsSpy);
writer = new MapFile.Writer(conf, pathSpy,
MapFile.Writer.keyClass(IntWritable.class),
MapFile.Writer.valueClass(IntWritable.class));
fail("fail in testPathExplosionWriterCreation !!!");
} catch (IOException ex) {
assertEquals("testPathExplosionWriterCreation ex message error !!!",
ex.getMessage(), TEST_ERROR_MESSAGE);
} catch (Exception e) {
fail("fail in testPathExplosionWriterCreation. Other ex !!!");
} finally {
if (writer != null)
try {
writer.close();
} catch (IOException e) {
}
}
}
/**
* test {@code MapFile.Writer.append} method with desc order
*/
@Test
public void testDescOrderWithThrowExceptionWriterAppend() {
try {
MapFile.Writer writer = createWriter(".mapfile", IntWritable.class,
Text.class);
writer.append(new IntWritable(2), new Text("value: " + 1));
writer.append(new IntWritable(2), new Text("value: " + 2));
writer.append(new IntWritable(2), new Text("value: " + 4));
writer.append(new IntWritable(1), new Text("value: " + 3));
fail("testDescOrderWithThrowExceptionWriterAppend not expected exception error !!!");
} catch (IOException ex) {
} catch (Exception e) {
fail("testDescOrderWithThrowExceptionWriterAppend other ex throw !!!");
}
}
@Test
public void testMainMethodMapFile() {
String path = new Path(TEST_DIR, "mainMethodMapFile.mapfile").toString();
String inFile = "mainMethodMapFile.mapfile";
String outFile = "mainMethodMapFile.mapfile";
String[] args = { path, outFile };
try {
MapFile.Writer writer = createWriter(inFile, IntWritable.class,
Text.class);
writer.append(new IntWritable(1), new Text("test_text1"));
writer.append(new IntWritable(2), new Text("test_text2"));
writer.close();
MapFile.main(args);
} catch (Exception ex) {
fail("testMainMethodMapFile error !!!");
}
}
/** /**
* Test getClosest feature. * Test getClosest feature.
*
* @throws Exception * @throws Exception
*/ */
@Test
@SuppressWarnings("deprecation")
public void testGetClosest() throws Exception { public void testGetClosest() throws Exception {
// Write a mapfile of simple data: keys are // Write a mapfile of simple data: keys are
Path dirName = new Path(System.getProperty("test.build.data",".") + Path dirName = new Path(TEST_DIR, "testGetClosest.mapfile");
getName() + ".mapfile");
FileSystem fs = FileSystem.getLocal(conf); FileSystem fs = FileSystem.getLocal(conf);
Path qualifiedDirName = fs.makeQualified(dirName); Path qualifiedDirName = fs.makeQualified(dirName);
// Make an index entry for every third insertion. // Make an index entry for every third insertion.
MapFile.Writer.setIndexInterval(conf, 3); MapFile.Writer.setIndexInterval(conf, 3);
MapFile.Writer writer = new MapFile.Writer(conf, fs, MapFile.Writer writer = new MapFile.Writer(conf, fs,
qualifiedDirName.toString(), Text.class, Text.class); qualifiedDirName.toString(), Text.class, Text.class);
// Assert that the index interval is 1 // Assert that the index interval is 1
assertEquals(3, writer.getIndexInterval()); assertEquals(3, writer.getIndexInterval());
// Add entries up to 100 in intervals of ten. // Add entries up to 100 in intervals of ten.
@ -51,74 +614,84 @@ public class TestMapFile extends TestCase {
} }
writer.close(); writer.close();
// Now do getClosest on created mapfile. // Now do getClosest on created mapfile.
MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(), MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
conf); try {
Text key = new Text("55"); Text key = new Text("55");
Text value = new Text(); Text value = new Text();
Text closest = (Text)reader.getClosest(key, value); Text closest = (Text) reader.getClosest(key, value);
// Assert that closest after 55 is 60 // Assert that closest after 55 is 60
assertEquals(new Text("60"), closest); assertEquals(new Text("60"), closest);
// Get closest that falls before the passed key: 50 // Get closest that falls before the passed key: 50
closest = (Text)reader.getClosest(key, value, true); closest = (Text) reader.getClosest(key, value, true);
assertEquals(new Text("50"), closest); assertEquals(new Text("50"), closest);
// Test get closest when we pass explicit key // Test get closest when we pass explicit key
final Text TWENTY = new Text("20"); final Text TWENTY = new Text("20");
closest = (Text)reader.getClosest(TWENTY, value); closest = (Text) reader.getClosest(TWENTY, value);
assertEquals(TWENTY, closest); assertEquals(TWENTY, closest);
closest = (Text)reader.getClosest(TWENTY, value, true); closest = (Text) reader.getClosest(TWENTY, value, true);
assertEquals(TWENTY, closest); assertEquals(TWENTY, closest);
// Test what happens at boundaries. Assert if searching a key that is // Test what happens at boundaries. Assert if searching a key that is
// less than first key in the mapfile, that the first key is returned. // less than first key in the mapfile, that the first key is returned.
key = new Text("00"); key = new Text("00");
closest = (Text)reader.getClosest(key, value); closest = (Text) reader.getClosest(key, value);
assertEquals(FIRST_KEY, Integer.parseInt(closest.toString())); assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
// If we're looking for the first key before, and we pass in a key before // If we're looking for the first key before, and we pass in a key before
// the first key in the file, we should get null // the first key in the file, we should get null
closest = (Text)reader.getClosest(key, value, true); closest = (Text) reader.getClosest(key, value, true);
assertNull(closest); assertNull(closest);
// Assert that null is returned if key is > last entry in mapfile. // Assert that null is returned if key is > last entry in mapfile.
key = new Text("99"); key = new Text("99");
closest = (Text)reader.getClosest(key, value); closest = (Text) reader.getClosest(key, value);
assertNull(closest); assertNull(closest);
// If we were looking for the key before, we should get the last key // If we were looking for the key before, we should get the last key
closest = (Text)reader.getClosest(key, value, true); closest = (Text) reader.getClosest(key, value, true);
assertEquals(new Text("90"), closest); assertEquals(new Text("90"), closest);
} finally {
reader.close();
}
} }
@Test
@SuppressWarnings("deprecation")
public void testMidKey() throws Exception { public void testMidKey() throws Exception {
// Write a mapfile of simple data: keys are // Write a mapfile of simple data: keys are
Path dirName = new Path(System.getProperty("test.build.data",".") + Path dirName = new Path(TEST_DIR, "testMidKey.mapfile");
getName() + ".mapfile");
FileSystem fs = FileSystem.getLocal(conf); FileSystem fs = FileSystem.getLocal(conf);
Path qualifiedDirName = fs.makeQualified(dirName); Path qualifiedDirName = fs.makeQualified(dirName);
MapFile.Writer writer = new MapFile.Writer(conf, fs, MapFile.Writer writer = new MapFile.Writer(conf, fs,
qualifiedDirName.toString(), IntWritable.class, IntWritable.class); qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
writer.append(new IntWritable(1), new IntWritable(1)); writer.append(new IntWritable(1), new IntWritable(1));
writer.close(); writer.close();
// Now do getClosest on created mapfile. // Now do getClosest on created mapfile.
MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(), MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
conf); try {
assertEquals(new IntWritable(1), reader.midKey()); assertEquals(new IntWritable(1), reader.midKey());
} finally {
reader.close();
}
} }
@Test
@SuppressWarnings("deprecation")
public void testMidKeyEmpty() throws Exception { public void testMidKeyEmpty() throws Exception {
// Write a mapfile of simple data: keys are // Write a mapfile of simple data: keys are
Path dirName = new Path(System.getProperty("test.build.data",".") + Path dirName = new Path(TEST_DIR, "testMidKeyEmpty.mapfile");
getName() + ".mapfile");
FileSystem fs = FileSystem.getLocal(conf); FileSystem fs = FileSystem.getLocal(conf);
Path qualifiedDirName = fs.makeQualified(dirName); Path qualifiedDirName = fs.makeQualified(dirName);
MapFile.Writer writer = new MapFile.Writer(conf, fs, MapFile.Writer writer = new MapFile.Writer(conf, fs,
qualifiedDirName.toString(), IntWritable.class, IntWritable.class); qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
writer.close(); writer.close();
// Now do getClosest on created mapfile. // Now do getClosest on created mapfile.
MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(), MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
conf); try {
assertEquals(null, reader.midKey()); assertEquals(null, reader.midKey());
} finally {
reader.close();
}
} }
} }

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.io;
import java.io.*; import java.io.*;
import java.util.*; import java.util.*;
import java.util.concurrent.atomic.AtomicReference;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.commons.logging.*; import org.apache.commons.logging.*;
@ -51,6 +53,39 @@ public class TestSetFile extends TestCase {
fs.close(); fs.close();
} }
} }
/**
* test {@code SetFile.Reader} methods
* next(), get() in combination
*/
public void testSetFileAccessMethods() {
try {
FileSystem fs = FileSystem.getLocal(conf);
int size = 10;
writeData(fs, size);
SetFile.Reader reader = createReader(fs);
assertTrue("testSetFileWithConstruction1 error !!!", reader.next(new IntWritable(0)));
// don't know why reader.get(i) return i+1
assertEquals("testSetFileWithConstruction2 error !!!", new IntWritable(size/2 + 1), reader.get(new IntWritable(size/2)));
assertNull("testSetFileWithConstruction3 error !!!", reader.get(new IntWritable(size*2)));
} catch (Exception ex) {
fail("testSetFileWithConstruction error !!!");
}
}
private SetFile.Reader createReader(FileSystem fs) throws IOException {
return new SetFile.Reader(fs, FILE,
WritableComparator.get(IntWritable.class), conf);
}
@SuppressWarnings("deprecation")
private void writeData(FileSystem fs, int elementSize) throws IOException {
MapFile.delete(fs, FILE);
SetFile.Writer writer = new SetFile.Writer(fs, FILE, IntWritable.class);
for (int i = 0; i < elementSize; i++)
writer.append(new IntWritable(i));
writer.close();
}
private static RandomDatum[] generate(int count) { private static RandomDatum[] generate(int count) {
LOG.info("generating " + count + " records in memory"); LOG.info("generating " + count + " records in memory");

View File

@ -19,11 +19,12 @@
package org.apache.hadoop.io; package org.apache.hadoop.io;
import junit.framework.TestCase; import junit.framework.TestCase;
import java.io.IOException; import java.io.IOException;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException; import java.nio.charset.CharacterCodingException;
import java.util.Random; import java.util.Random;
import com.google.common.primitives.Bytes;
/** Unit tests for LargeUTF8. */ /** Unit tests for LargeUTF8. */
public class TestText extends TestCase { public class TestText extends TestCase {
@ -321,7 +322,81 @@ public class TestText extends TestCase {
(new Text("foo"), (new Text("foo"),
"{\"type\":\"string\",\"java-class\":\"org.apache.hadoop.io.Text\"}"); "{\"type\":\"string\",\"java-class\":\"org.apache.hadoop.io.Text\"}");
} }
/**
*
*/
public void testCharAt() {
String line = "adsawseeeeegqewgasddga";
Text text = new Text(line);
for (int i = 0; i < line.length(); i++) {
assertTrue("testCharAt error1 !!!", text.charAt(i) == line.charAt(i));
}
assertEquals("testCharAt error2 !!!", -1, text.charAt(-1));
assertEquals("testCharAt error3 !!!", -1, text.charAt(100));
}
/**
* test {@code Text} readFields/write operations
*/
public void testReadWriteOperations() {
String line = "adsawseeeeegqewgasddga";
byte[] inputBytes = line.getBytes();
inputBytes = Bytes.concat(new byte[] {(byte)22}, inputBytes);
DataInputBuffer in = new DataInputBuffer();
DataOutputBuffer out = new DataOutputBuffer();
Text text = new Text(line);
try {
in.reset(inputBytes, inputBytes.length);
text.readFields(in);
} catch(Exception ex) {
fail("testReadFields error !!!");
}
try {
text.write(out);
} catch(IOException ex) {
} catch(Exception ex) {
fail("testReadWriteOperations error !!!");
}
}
/**
* test {@code Text.bytesToCodePoint(bytes) }
* with {@code BufferUnderflowException}
*
*/
public void testBytesToCodePoint() {
try {
ByteBuffer bytes = ByteBuffer.wrap(new byte[] {-2, 45, 23, 12, 76, 89});
Text.bytesToCodePoint(bytes);
assertTrue("testBytesToCodePoint error !!!", bytes.position() == 6 );
} catch (BufferUnderflowException ex) {
fail("testBytesToCodePoint unexp exception");
} catch (Exception e) {
fail("testBytesToCodePoint unexp exception");
}
}
public void testbytesToCodePointWithInvalidUTF() {
try {
Text.bytesToCodePoint(ByteBuffer.wrap(new byte[] {-2}));
fail("testbytesToCodePointWithInvalidUTF error unexp exception !!!");
} catch (BufferUnderflowException ex) {
} catch(Exception e) {
fail("testbytesToCodePointWithInvalidUTF error unexp exception !!!");
}
}
public void testUtf8Length() {
assertEquals("testUtf8Length1 error !!!", 1, Text.utf8Length(new String(new char[]{(char)1})));
assertEquals("testUtf8Length127 error !!!", 1, Text.utf8Length(new String(new char[]{(char)127})));
assertEquals("testUtf8Length128 error !!!", 2, Text.utf8Length(new String(new char[]{(char)128})));
assertEquals("testUtf8Length193 error !!!", 2, Text.utf8Length(new String(new char[]{(char)193})));
assertEquals("testUtf8Length225 error !!!", 2, Text.utf8Length(new String(new char[]{(char)225})));
assertEquals("testUtf8Length254 error !!!", 2, Text.utf8Length(new String(new char[]{(char)254})));
}
public static void main(String[] args) throws Exception public static void main(String[] args) throws Exception
{ {
TestText test = new TestText("main"); TestText test = new TestText("main");

View File

@ -0,0 +1,342 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.snappy;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.lang.reflect.Array;
import java.util.Random;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.compress.BlockCompressorStream;
import org.apache.hadoop.io.compress.BlockDecompressorStream;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.SnappyCodec;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assume.*;
public class TestSnappyCompressorDecompressor {
@Before
public void before() {
assumeTrue(SnappyCodec.isNativeCodeLoaded());
}
@Test
public void testSnappyCompressorSetInputNullPointerException() {
try {
SnappyCompressor compressor = new SnappyCompressor();
compressor.setInput(null, 0, 10);
fail("testSnappyCompressorSetInputNullPointerException error !!!");
} catch (NullPointerException ex) {
// excepted
} catch (Exception ex) {
fail("testSnappyCompressorSetInputNullPointerException ex error !!!");
}
}
@Test
public void testSnappyDecompressorSetInputNullPointerException() {
try {
SnappyDecompressor decompressor = new SnappyDecompressor();
decompressor.setInput(null, 0, 10);
fail("testSnappyDecompressorSetInputNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testSnappyDecompressorSetInputNullPointerException ex error !!!");
}
}
@Test
public void testSnappyCompressorSetInputAIOBException() {
try {
SnappyCompressor compressor = new SnappyCompressor();
compressor.setInput(new byte[] {}, -5, 10);
fail("testSnappyCompressorSetInputAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception ex) {
fail("testSnappyCompressorSetInputAIOBException ex error !!!");
}
}
@Test
public void testSnappyDecompressorSetInputAIOUBException() {
try {
SnappyDecompressor decompressor = new SnappyDecompressor();
decompressor.setInput(new byte[] {}, -5, 10);
fail("testSnappyDecompressorSetInputAIOUBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testSnappyDecompressorSetInputAIOUBException ex error !!!");
}
}
@Test
public void testSnappyCompressorCompressNullPointerException() {
try {
SnappyCompressor compressor = new SnappyCompressor();
byte[] bytes = BytesGenerator.get(1024 * 6);
compressor.setInput(bytes, 0, bytes.length);
compressor.compress(null, 0, 0);
fail("testSnappyCompressorCompressNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testSnappyCompressorCompressNullPointerException ex error !!!");
}
}
@Test
public void testSnappyDecompressorCompressNullPointerException() {
try {
SnappyDecompressor decompressor = new SnappyDecompressor();
byte[] bytes = BytesGenerator.get(1024 * 6);
decompressor.setInput(bytes, 0, bytes.length);
decompressor.decompress(null, 0, 0);
fail("testSnappyDecompressorCompressNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testSnappyDecompressorCompressNullPointerException ex error !!!");
}
}
@Test
public void testSnappyCompressorCompressAIOBException() {
try {
SnappyCompressor compressor = new SnappyCompressor();
byte[] bytes = BytesGenerator.get(1024 * 6);
compressor.setInput(bytes, 0, bytes.length);
compressor.compress(new byte[] {}, 0, -1);
fail("testSnappyCompressorCompressAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testSnappyCompressorCompressAIOBException ex error !!!");
}
}
@Test
public void testSnappyDecompressorCompressAIOBException() {
try {
SnappyDecompressor decompressor = new SnappyDecompressor();
byte[] bytes = BytesGenerator.get(1024 * 6);
decompressor.setInput(bytes, 0, bytes.length);
decompressor.decompress(new byte[] {}, 0, -1);
fail("testSnappyDecompressorCompressAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testSnappyDecompressorCompressAIOBException ex error !!!");
}
}
@Test
public void testSnappyCompressDecompress() {
int BYTE_SIZE = 1024 * 54;
byte[] bytes = BytesGenerator.get(BYTE_SIZE);
SnappyCompressor compressor = new SnappyCompressor();
try {
compressor.setInput(bytes, 0, bytes.length);
assertTrue("SnappyCompressDecompress getBytesRead error !!!",
compressor.getBytesRead() > 0);
assertTrue(
"SnappyCompressDecompress getBytesWritten before compress error !!!",
compressor.getBytesWritten() == 0);
byte[] compressed = new byte[BYTE_SIZE];
int cSize = compressor.compress(compressed, 0, compressed.length);
assertTrue(
"SnappyCompressDecompress getBytesWritten after compress error !!!",
compressor.getBytesWritten() > 0);
SnappyDecompressor decompressor = new SnappyDecompressor(BYTE_SIZE);
// set as input for decompressor only compressed data indicated with cSize
decompressor.setInput(compressed, 0, cSize);
byte[] decompressed = new byte[BYTE_SIZE];
decompressor.decompress(decompressed, 0, decompressed.length);
assertTrue("testSnappyCompressDecompress finished error !!!",
decompressor.finished());
Assert.assertArrayEquals(bytes, decompressed);
compressor.reset();
decompressor.reset();
assertTrue("decompressor getRemaining error !!!",
decompressor.getRemaining() == 0);
} catch (Exception e) {
fail("testSnappyCompressDecompress ex error!!!");
}
}
@Test
public void testCompressorDecompressorEmptyStreamLogic() {
ByteArrayInputStream bytesIn = null;
ByteArrayOutputStream bytesOut = null;
byte[] buf = null;
BlockDecompressorStream blockDecompressorStream = null;
try {
// compress empty stream
bytesOut = new ByteArrayOutputStream();
BlockCompressorStream blockCompressorStream = new BlockCompressorStream(
bytesOut, new SnappyCompressor(), 1024, 0);
// close without write
blockCompressorStream.close();
// check compressed output
buf = bytesOut.toByteArray();
assertEquals("empty stream compressed output size != 4", 4, buf.length);
// use compressed output as input for decompression
bytesIn = new ByteArrayInputStream(buf);
// create decompression stream
blockDecompressorStream = new BlockDecompressorStream(bytesIn,
new SnappyDecompressor(), 1024);
// no byte is available because stream was closed
assertEquals("return value is not -1", -1, blockDecompressorStream.read());
} catch (Exception e) {
fail("testCompressorDecompressorEmptyStreamLogic ex error !!!"
+ e.getMessage());
} finally {
if (blockDecompressorStream != null)
try {
bytesIn.close();
bytesOut.close();
blockDecompressorStream.close();
} catch (IOException e) {
}
}
}
@Test
public void testSnappyBlockCompression() {
int BYTE_SIZE = 1024 * 50;
int BLOCK_SIZE = 512;
ByteArrayOutputStream out = new ByteArrayOutputStream();
byte[] block = new byte[BLOCK_SIZE];
byte[] bytes = BytesGenerator.get(BYTE_SIZE);
try {
// Use default of 512 as bufferSize and compressionOverhead of
// (1% of bufferSize + 12 bytes) = 18 bytes (zlib algorithm).
SnappyCompressor compressor = new SnappyCompressor();
int off = 0;
int len = BYTE_SIZE;
int maxSize = BLOCK_SIZE - 18;
if (BYTE_SIZE > maxSize) {
do {
int bufLen = Math.min(len, maxSize);
compressor.setInput(bytes, off, bufLen);
compressor.finish();
while (!compressor.finished()) {
compressor.compress(block, 0, block.length);
out.write(block);
}
compressor.reset();
off += bufLen;
len -= bufLen;
} while (len > 0);
}
assertTrue("testSnappyBlockCompression error !!!",
out.toByteArray().length > 0);
} catch (Exception ex) {
fail("testSnappyBlockCompression ex error !!!");
}
}
@Test
public void testSnappyCompressorDecopressorLogicWithCompressionStreams() {
int BYTE_SIZE = 1024 * 100;
byte[] bytes = BytesGenerator.get(BYTE_SIZE);
int bufferSize = 262144;
int compressionOverhead = (bufferSize / 6) + 32;
DataOutputStream deflateOut = null;
DataInputStream inflateIn = null;
try {
DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
CompressionOutputStream deflateFilter = new BlockCompressorStream(
compressedDataBuffer, new SnappyCompressor(bufferSize), bufferSize,
compressionOverhead);
deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter));
deflateOut.write(bytes, 0, bytes.length);
deflateOut.flush();
deflateFilter.finish();
DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
compressedDataBuffer.getLength());
CompressionInputStream inflateFilter = new BlockDecompressorStream(
deCompressedDataBuffer, new SnappyDecompressor(bufferSize),
bufferSize);
inflateIn = new DataInputStream(new BufferedInputStream(inflateFilter));
byte[] result = new byte[BYTE_SIZE];
inflateIn.read(result);
Assert.assertArrayEquals(
"original array not equals compress/decompressed array", result,
bytes);
} catch (IOException e) {
fail("testSnappyCompressorDecopressorLogicWithCompressionStreams ex error !!!");
} finally {
try {
if (deflateOut != null)
deflateOut.close();
if (inflateIn != null)
inflateIn.close();
} catch (Exception e) {
}
}
}
static final class BytesGenerator {
private BytesGenerator() {
}
private static final byte[] CACHE = new byte[] { 0x0, 0x1, 0x2, 0x3, 0x4,
0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF };
private static final Random rnd = new Random(12345l);
public static byte[] get(int size) {
byte[] array = (byte[]) Array.newInstance(byte.class, size);
for (int i = 0; i < size; i++)
array[i] = CACHE[rnd.nextInt(CACHE.length - 1)];
return array;
}
}
}

View File

@ -16,11 +16,21 @@
*/ */
package org.apache.hadoop.security; package org.apache.hadoop.security;
import static org.junit.Assert.*; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.TestSaslRPC;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Shell;
import org.junit.*; import org.junit.*;
import static org.mockito.Mockito.*; import javax.security.auth.Subject;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.LoginContext;
import java.io.BufferedReader; import java.io.BufferedReader;
import java.io.IOException; import java.io.IOException;
import java.io.InputStreamReader; import java.io.InputStreamReader;
@ -30,21 +40,13 @@ import java.util.Collection;
import java.util.LinkedHashSet; import java.util.LinkedHashSet;
import java.util.Set; import java.util.Set;
import javax.security.auth.Subject;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.LoginContext;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import static org.apache.hadoop.test.MetricsAsserts.*;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
import org.apache.hadoop.util.Shell; import static org.apache.hadoop.ipc.TestSaslRPC.*;
import static org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
import static org.apache.hadoop.test.MetricsAsserts.*;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestUserGroupInformation { public class TestUserGroupInformation {
final private static String USER_NAME = "user1@HADOOP.APACHE.ORG"; final private static String USER_NAME = "user1@HADOOP.APACHE.ORG";
@ -786,4 +788,29 @@ public class TestUserGroupInformation {
UserGroupInformation.setLoginUser(ugi); UserGroupInformation.setLoginUser(ugi);
assertEquals(ugi, UserGroupInformation.getLoginUser()); assertEquals(ugi, UserGroupInformation.getLoginUser());
} }
/**
* In some scenario, such as HA, delegation tokens are associated with a
* logical name. The tokens are cloned and are associated with the
* physical address of the server where the service is provided.
* This test ensures cloned delegated tokens are locally used
* and are not returned in {@link UserGroupInformation#getCredentials()}
*/
@Test
public void testPrivateTokenExclusion() throws Exception {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
TestTokenIdentifier tokenId = new TestTokenIdentifier();
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(
tokenId.getBytes(), "password".getBytes(),
tokenId.getKind(), null);
ugi.addToken(new Text("regular-token"), token);
// Now add cloned private token
ugi.addToken(new Text("private-token"), new Token.PrivateToken<TestTokenIdentifier>(token));
ugi.addToken(new Text("private-token1"), new Token.PrivateToken<TestTokenIdentifier>(token));
// Ensure only non-private tokens are returned
Collection<Token<? extends TokenIdentifier>> tokens = ugi.getCredentials().getAllTokens();
assertEquals(1, tokens.size());
}
} }

View File

@ -0,0 +1,533 @@
package org.apache.hadoop.util.bloom;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.AbstractCollection;
import java.util.Collection;
import java.util.Iterator;
import java.util.Random;
import org.junit.Assert;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.util.hash.Hash;
import org.apache.log4j.Logger;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
public class BloomFilterCommonTester<T extends Filter> {
private static final double LN2 = Math.log(2);
private static final double LN2_SQUARED = LN2 * LN2;
private final int hashType;
private final int numInsertions;
private final ImmutableList.Builder<T> builder = ImmutableList.builder();
private ImmutableSet<BloomFilterTestStrategy> filterTestStrateges;
private final PreAssertionHelper preAssertionHelper;
static int optimalNumOfBits(int n, double p) {
return (int) (-n * Math.log(p) / LN2_SQUARED);
}
public static <T extends Filter> BloomFilterCommonTester<T> of(int hashId,
int numInsertions) {
return new BloomFilterCommonTester<T>(hashId, numInsertions);
}
public BloomFilterCommonTester<T> withFilterInstance(T filter) {
builder.add(filter);
return this;
}
private BloomFilterCommonTester(int hashId, int numInsertions) {
this.hashType = hashId;
this.numInsertions = numInsertions;
this.preAssertionHelper = new PreAssertionHelper() {
@Override
public ImmutableSet<Integer> falsePositives(int hashId) {
switch (hashId) {
case Hash.JENKINS_HASH: {
// // false pos for odd and event under 1000
return ImmutableSet.of(99, 963);
}
case Hash.MURMUR_HASH: {
// false pos for odd and event under 1000
return ImmutableSet.of(769, 772, 810, 874);
}
default: {
// fail fast with unknown hash error !!!
Assert.assertFalse("unknown hash error", true);
return ImmutableSet.of();
}
}
}
};
}
public BloomFilterCommonTester<T> withTestCases(
ImmutableSet<BloomFilterTestStrategy> filterTestStrateges) {
this.filterTestStrateges = ImmutableSet.copyOf(filterTestStrateges);
return this;
}
@SuppressWarnings("unchecked")
public void test() {
final ImmutableList<T> filtersList = builder.build();
final ImmutableSet<Integer> falsePositives = preAssertionHelper
.falsePositives(hashType);
for (T filter : filtersList) {
for (BloomFilterTestStrategy strategy : filterTestStrateges) {
strategy.getStrategy().assertWhat(filter, numInsertions, hashType, falsePositives);
// create fresh instance for next test iteration
filter = (T) getSymmetricFilter(filter.getClass(), numInsertions, hashType);
}
}
}
interface FilterTesterStrategy {
final Logger logger = Logger.getLogger(FilterTesterStrategy.class);
void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives);
}
private static Filter getSymmetricFilter(Class<?> filterClass,
int numInsertions, int hashType) {
int bitSetSize = optimalNumOfBits(numInsertions, 0.03);
int hashFunctionNumber = 5;
if (filterClass == BloomFilter.class) {
return new BloomFilter(bitSetSize, hashFunctionNumber, hashType);
} else if (filterClass == CountingBloomFilter.class) {
return new CountingBloomFilter(bitSetSize, hashFunctionNumber, hashType);
} else if (filterClass == RetouchedBloomFilter.class) {
return new RetouchedBloomFilter(bitSetSize, hashFunctionNumber, hashType);
} else if (filterClass == DynamicBloomFilter.class) {
return new DynamicBloomFilter(bitSetSize, hashFunctionNumber, hashType, 3);
} else {
//fail fast
assertFalse("unexpected filterClass", true);
return null;
}
}
public enum BloomFilterTestStrategy {
ADD_KEYS_STRATEGY(new FilterTesterStrategy() {
private final ImmutableList<Key> keys = ImmutableList.of(new Key(
new byte[] { 49, 48, 48 }), new Key(new byte[] { 50, 48, 48 }));
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
filter.add(keys);
assertTrue(" might contain key error ",
filter.membershipTest(new Key("100".getBytes())));
assertTrue(" might contain key error ",
filter.membershipTest(new Key("200".getBytes())));
filter.add(keys.toArray(new Key[] {}));
assertTrue(" might contain key error ",
filter.membershipTest(new Key("100".getBytes())));
assertTrue(" might contain key error ",
filter.membershipTest(new Key("200".getBytes())));
filter.add(new AbstractCollection<Key>() {
@Override
public Iterator<Key> iterator() {
return keys.iterator();
}
@Override
public int size() {
return keys.size();
}
});
assertTrue(" might contain key error ",
filter.membershipTest(new Key("100".getBytes())));
assertTrue(" might contain key error ",
filter.membershipTest(new Key("200".getBytes())));
}
}),
KEY_TEST_STRATEGY(new FilterTesterStrategy() {
private void checkOnKeyMethods() {
String line = "werabsdbe";
Key key = new Key(line.getBytes());
assertTrue("default key weight error ", key.getWeight() == 1d);
key.set(line.getBytes(), 2d);
assertTrue(" setted key weight error ", key.getWeight() == 2d);
Key sKey = new Key(line.getBytes(), 2d);
assertTrue("equals error", key.equals(sKey));
assertTrue("hashcode error", key.hashCode() == sKey.hashCode());
sKey = new Key(line.concat("a").getBytes(), 2d);
assertFalse("equals error", key.equals(sKey));
assertFalse("hashcode error", key.hashCode() == sKey.hashCode());
sKey = new Key(line.getBytes(), 3d);
assertFalse("equals error", key.equals(sKey));
assertFalse("hashcode error", key.hashCode() == sKey.hashCode());
key.incrementWeight();
assertTrue("weight error", key.getWeight() == 3d);
key.incrementWeight(2d);
assertTrue("weight error", key.getWeight() == 5d);
}
private void checkOnReadWrite() {
String line = "qryqeb354645rghdfvbaq23312fg";
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
Key originKey = new Key(line.getBytes(), 100d);
try {
originKey.write(out);
in.reset(out.getData(), out.getData().length);
Key restoredKey = new Key(new byte[] { 0 });
assertFalse("checkOnReadWrite equals error", restoredKey.equals(originKey));
restoredKey.readFields(in);
assertTrue("checkOnReadWrite equals error", restoredKey.equals(originKey));
out.reset();
} catch (Exception ioe) {
Assert.fail("checkOnReadWrite ex error");
}
}
private void checkSetOnIAE() {
Key key = new Key();
try {
key.set(null, 0);
} catch (IllegalArgumentException ex) {
// expected
} catch (Exception e) {
Assert.fail("checkSetOnIAE ex error");
}
}
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
checkOnKeyMethods();
checkOnReadWrite();
checkSetOnIAE();
}
}),
EXCEPTIONS_CHECK_STRATEGY(new FilterTesterStrategy() {
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
checkAddOnNPE(filter);
checkTestMembershipOnNPE(filter);
checkAndOnIAE(filter);
}
private void checkAndOnIAE(Filter filter) {
Filter tfilter = null;
try {
Collection<Key> keys = null;
filter.add(keys);
} catch (IllegalArgumentException ex) {
//
} catch (Exception e) {
Assert.fail("" + e);
}
try {
Key[] keys = null;
filter.add(keys);
} catch (IllegalArgumentException ex) {
//
} catch (Exception e) {
Assert.fail("" + e);
}
try {
ImmutableList<Key> keys = null;
filter.add(keys);
} catch (IllegalArgumentException ex) {
//
} catch (Exception e) {
Assert.fail("" + e);
}
try {
filter.and(tfilter);
} catch (IllegalArgumentException ex) {
// expected
} catch (Exception e) {
Assert.fail("" + e);
}
try {
filter.or(tfilter);
} catch (IllegalArgumentException ex) {
// expected
} catch (Exception e) {
Assert.fail("" + e);
}
try {
filter.xor(tfilter);
} catch (IllegalArgumentException ex) {
// expected
} catch (UnsupportedOperationException unex) {
//
} catch (Exception e) {
Assert.fail("" + e);
}
}
private void checkTestMembershipOnNPE(Filter filter) {
try {
Key nullKey = null;
filter.membershipTest(nullKey);
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
Assert.fail("" + e);
}
}
private void checkAddOnNPE(Filter filter) {
try {
Key nullKey = null;
filter.add(nullKey);
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
Assert.fail("" + e);
}
}
}),
ODD_EVEN_ABSENT_STRATEGY(new FilterTesterStrategy() {
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
// add all even keys
for (int i = 0; i < numInsertions; i += 2) {
filter.add(new Key(Integer.toString(i).getBytes()));
}
// check on present even key
for (int i = 0; i < numInsertions; i += 2) {
Assert.assertTrue(" filter might contains " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
// check on absent odd in event
for (int i = 1; i < numInsertions; i += 2) {
if (!falsePositives.contains(i)) {
assertFalse(" filter should not contain " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
}
}
}),
WRITE_READ_STRATEGY(new FilterTesterStrategy() {
private int slotSize = 10;
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
final Random rnd = new Random();
final DataOutputBuffer out = new DataOutputBuffer();
final DataInputBuffer in = new DataInputBuffer();
try {
Filter tempFilter = getSymmetricFilter(filter.getClass(),
numInsertions, hashId);
ImmutableList.Builder<Integer> blist = ImmutableList.builder();
for (int i = 0; i < slotSize; i++) {
blist.add(rnd.nextInt(numInsertions * 2));
}
ImmutableList<Integer> list = blist.build();
// mark bits for later check
for (Integer slot : list) {
filter.add(new Key(String.valueOf(slot).getBytes()));
}
filter.write(out);
in.reset(out.getData(), out.getLength());
tempFilter.readFields(in);
for (Integer slot : list) {
assertTrue("read/write mask check filter error on " + slot,
filter.membershipTest(new Key(String.valueOf(slot).getBytes())));
}
} catch (IOException ex) {
Assert.fail("error ex !!!" + ex);
}
}
}),
FILTER_XOR_STRATEGY(new FilterTesterStrategy() {
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
Filter symmetricFilter = getSymmetricFilter(filter.getClass(),
numInsertions, hashId);
try {
// 0 xor 0 -> 0
filter.xor(symmetricFilter);
// check on present all key
for (int i = 0; i < numInsertions; i++) {
Assert.assertFalse(" filter might contains " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
// add all even keys
for (int i = 0; i < numInsertions; i += 2) {
filter.add(new Key(Integer.toString(i).getBytes()));
}
// add all odd keys
for (int i = 0; i < numInsertions; i += 2) {
symmetricFilter.add(new Key(Integer.toString(i).getBytes()));
}
filter.xor(symmetricFilter);
// 1 xor 1 -> 0
// check on absent all key
for (int i = 0; i < numInsertions; i++) {
Assert.assertFalse(" filter might not contains " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
} catch (UnsupportedOperationException ex) {
// not all Filter's implements this method
return;
}
}
}),
FILTER_AND_STRATEGY(new FilterTesterStrategy() {
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
int startIntersection = numInsertions - (numInsertions - 100);
int endIntersection = numInsertions - 100;
Filter partialFilter = getSymmetricFilter(filter.getClass(),
numInsertions, hashId);
for (int i = 0; i < numInsertions; i++) {
String digit = Integer.toString(i);
filter.add(new Key(digit.getBytes()));
if (i >= startIntersection && i <= endIntersection) {
partialFilter.add(new Key(digit.getBytes()));
}
}
// do logic AND
filter.and(partialFilter);
for (int i = 0; i < numInsertions; i++) {
if (i >= startIntersection && i <= endIntersection) {
Assert.assertTrue(" filter might contains " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
}
}
}),
FILTER_OR_STRATEGY(new FilterTesterStrategy() {
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
Filter evenFilter = getSymmetricFilter(filter.getClass(),
numInsertions, hashId);
// add all even
for (int i = 0; i < numInsertions; i += 2) {
evenFilter.add(new Key(Integer.toString(i).getBytes()));
}
// add all odd
for (int i = 1; i < numInsertions; i += 2) {
filter.add(new Key(Integer.toString(i).getBytes()));
}
// union odd with even
filter.or(evenFilter);
// check on present all key
for (int i = 0; i < numInsertions; i++) {
Assert.assertTrue(" filter might contains " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
}
});
private final FilterTesterStrategy testerStrategy;
BloomFilterTestStrategy(FilterTesterStrategy testerStrategy) {
this.testerStrategy = testerStrategy;
}
public FilterTesterStrategy getStrategy() {
return testerStrategy;
}
}
interface PreAssertionHelper {
public ImmutableSet<Integer> falsePositives(int hashId);
}
}

View File

@ -0,0 +1,240 @@
package org.apache.hadoop.util.bloom;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.util.AbstractCollection;
import java.util.Iterator;
import org.apache.hadoop.util.bloom.BloomFilterCommonTester.BloomFilterTestStrategy;
import org.apache.hadoop.util.hash.Hash;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
public class TestBloomFilters {
int numInsertions = 1000;
int bitSize = BloomFilterCommonTester.optimalNumOfBits(numInsertions, 0.03);
int hashFunctionNumber = 5;
private static final ImmutableMap<Integer, ? extends AbstractCollection<Key>> FALSE_POSITIVE_UNDER_1000 = ImmutableMap
.of(Hash.JENKINS_HASH, new AbstractCollection<Key>() {
final ImmutableList<Key> falsePositive = ImmutableList.<Key> of(
new Key("99".getBytes()), new Key("963".getBytes()));
@Override
public Iterator<Key> iterator() {
return falsePositive.iterator();
}
@Override
public int size() {
return falsePositive.size();
}
}, Hash.MURMUR_HASH, new AbstractCollection<Key>() {
final ImmutableList<Key> falsePositive = ImmutableList.<Key> of(
new Key("769".getBytes()), new Key("772".getBytes()),
new Key("810".getBytes()), new Key("874".getBytes()));
@Override
public Iterator<Key> iterator() {
return falsePositive.iterator();
}
@Override
public int size() {
return falsePositive.size();
}
});
private enum Digits {
ODD(1), EVEN(0);
int start;
Digits(int start) {
this.start = start;
}
int getStart() {
return start;
}
}
@Test
public void testDynamicBloomFilter() {
int hashId = Hash.JENKINS_HASH;
Filter filter = new DynamicBloomFilter(bitSize, hashFunctionNumber,
Hash.JENKINS_HASH, 3);
BloomFilterCommonTester.of(hashId, numInsertions)
.withFilterInstance(filter)
.withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,
BloomFilterTestStrategy.ADD_KEYS_STRATEGY,
BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,
BloomFilterTestStrategy.WRITE_READ_STRATEGY,
BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY))
.test();
assertNotNull("testDynamicBloomFilter error ", filter.toString());
}
@Test
public void testCountingBloomFilter() {
int hashId = Hash.JENKINS_HASH;
CountingBloomFilter filter = new CountingBloomFilter(bitSize,
hashFunctionNumber, hashId);
Key key = new Key(new byte[] { 48, 48 });
filter.add(key);
assertTrue("CountingBloomFilter.membership error ",
filter.membershipTest(key));
assertTrue("CountingBloomFilter.approximateCount error",
filter.approximateCount(key) == 1);
filter.add(key);
assertTrue("CountingBloomFilter.approximateCount error",
filter.approximateCount(key) == 2);
filter.delete(key);
assertTrue("CountingBloomFilter.membership error ",
filter.membershipTest(key));
filter.delete(key);
assertFalse("CountingBloomFilter.membership error ",
filter.membershipTest(key));
assertTrue("CountingBloomFilter.approximateCount error",
filter.approximateCount(key) == 0);
BloomFilterCommonTester.of(hashId, numInsertions)
.withFilterInstance(filter)
.withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,
BloomFilterTestStrategy.ADD_KEYS_STRATEGY,
BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,
BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY,
BloomFilterTestStrategy.WRITE_READ_STRATEGY,
BloomFilterTestStrategy.FILTER_OR_STRATEGY,
BloomFilterTestStrategy.FILTER_XOR_STRATEGY)).test();
}
@Test
public void testRetouchedBloomFilterSpecific() {
int numInsertions = 1000;
int hashFunctionNumber = 5;
ImmutableSet<Integer> hashes = ImmutableSet.of(Hash.MURMUR_HASH,
Hash.JENKINS_HASH);
for (Integer hashId : hashes) {
RetouchedBloomFilter filter = new RetouchedBloomFilter(bitSize,
hashFunctionNumber, hashId);
checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.ODD,
RemoveScheme.MAXIMUM_FP);
filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.EVEN,
RemoveScheme.MAXIMUM_FP);
filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.ODD,
RemoveScheme.MINIMUM_FN);
filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.EVEN,
RemoveScheme.MINIMUM_FN);
filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.ODD,
RemoveScheme.RATIO);
filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.EVEN,
RemoveScheme.RATIO);
filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
}
}
private void checkOnAbsentFalsePositive(int hashId, int numInsertions,
final RetouchedBloomFilter filter, Digits digits, short removeSchema) {
AbstractCollection<Key> falsePositives = FALSE_POSITIVE_UNDER_1000
.get(hashId);
if (falsePositives == null)
Assert.fail(String.format("false positives for hash %d not founded",
hashId));
filter.addFalsePositive(falsePositives);
for (int i = digits.getStart(); i < numInsertions; i += 2) {
filter.add(new Key(Integer.toString(i).getBytes()));
}
for (Key key : falsePositives) {
filter.selectiveClearing(key, removeSchema);
}
for (int i = 1 - digits.getStart(); i < numInsertions; i += 2) {
assertFalse(" testRetouchedBloomFilterAddFalsePositive error " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
}
@Test
public void testFiltersWithJenkinsHash() {
int hashId = Hash.JENKINS_HASH;
BloomFilterCommonTester.of(hashId, numInsertions)
.withFilterInstance(new BloomFilter(bitSize, hashFunctionNumber, hashId))
.withFilterInstance(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId))
.withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,
BloomFilterTestStrategy.ADD_KEYS_STRATEGY,
BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,
BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY,
BloomFilterTestStrategy.WRITE_READ_STRATEGY,
BloomFilterTestStrategy.FILTER_OR_STRATEGY,
BloomFilterTestStrategy.FILTER_AND_STRATEGY,
BloomFilterTestStrategy.FILTER_XOR_STRATEGY)).test();
}
@Test
public void testFiltersWithMurmurHash() {
int hashId = Hash.MURMUR_HASH;
BloomFilterCommonTester.of(hashId, numInsertions)
.withFilterInstance(new BloomFilter(bitSize, hashFunctionNumber, hashId))
.withFilterInstance(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId))
.withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,
BloomFilterTestStrategy.ADD_KEYS_STRATEGY,
BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,
BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY,
BloomFilterTestStrategy.WRITE_READ_STRATEGY,
BloomFilterTestStrategy.FILTER_OR_STRATEGY,
BloomFilterTestStrategy.FILTER_AND_STRATEGY,
BloomFilterTestStrategy.FILTER_XOR_STRATEGY)).test();
}
}

View File

@ -0,0 +1,89 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util.hash;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
public class TestHash {
static final String LINE = "34563@45kjkksdf/ljfdb9d8fbusd*89uggjsk<dfgjsdfh@sddc2q3esc";
@Test
public void testHash() {
int iterations = 30;
assertTrue("testHash jenkins error !!!",
Hash.JENKINS_HASH == Hash.parseHashType("jenkins"));
assertTrue("testHash murmur error !!!",
Hash.MURMUR_HASH == Hash.parseHashType("murmur"));
assertTrue("testHash undefined",
Hash.INVALID_HASH == Hash.parseHashType("undefined"));
Configuration cfg = new Configuration();
cfg.set("hadoop.util.hash.type", "murmur");
assertTrue("testHash", MurmurHash.getInstance() == Hash.getInstance(cfg));
cfg = new Configuration();
cfg.set("hadoop.util.hash.type", "jenkins");
assertTrue("testHash jenkins configuration error !!!",
JenkinsHash.getInstance() == Hash.getInstance(cfg));
cfg = new Configuration();
assertTrue("testHash undefine configuration error !!!",
MurmurHash.getInstance() == Hash.getInstance(cfg));
assertTrue("testHash error jenkin getInstance !!!",
JenkinsHash.getInstance() == Hash.getInstance(Hash.JENKINS_HASH));
assertTrue("testHash error murmur getInstance !!!",
MurmurHash.getInstance() == Hash.getInstance(Hash.MURMUR_HASH));
assertNull("testHash error invalid getInstance !!!",
Hash.getInstance(Hash.INVALID_HASH));
int murmurHash = Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes());
for (int i = 0; i < iterations; i++) {
assertTrue("multiple evaluation murmur hash error !!!",
murmurHash == Hash.getInstance(Hash.MURMUR_HASH)
.hash(LINE.getBytes()));
}
murmurHash = Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes(), 67);
for (int i = 0; i < iterations; i++) {
assertTrue(
"multiple evaluation murmur hash error !!!",
murmurHash == Hash.getInstance(Hash.MURMUR_HASH).hash(
LINE.getBytes(), 67));
}
int jenkinsHash = Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes());
for (int i = 0; i < iterations; i++) {
assertTrue(
"multiple evaluation jenkins hash error !!!",
jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(
LINE.getBytes()));
}
jenkinsHash = Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes(), 67);
for (int i = 0; i < iterations; i++) {
assertTrue(
"multiple evaluation jenkins hash error !!!",
jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(
LINE.getBytes(), 67));
}
}
}

View File

@ -0,0 +1,4 @@
%2F dir 1380270822000+511+root+wheel 0 0 dir1
%2Fdir1 dir 1380270441000+493+jdere+wheel 0 0 1.txt 2.txt
%2Fdir1%2F1.txt file part-0 0 0 1380270439000+420+jdere+wheel
%2Fdir1%2F2.txt file part-0 0 0 1380270441000+420+jdere+wheel

View File

@ -0,0 +1,2 @@
3
0 1210114968 0 232

View File

@ -22,13 +22,8 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mount.MountdBase; import org.apache.hadoop.mount.MountdBase;
import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.RpcUtil;
import org.apache.hadoop.oncrpc.SimpleTcpServer; import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.oncrpc.SimpleTcpServerHandler;
import org.apache.hadoop.portmap.PortmapMapping; import org.apache.hadoop.portmap.PortmapMapping;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelPipelineFactory;
import org.jboss.netty.channel.Channels;
/** /**
* Nfs server. Supports NFS v3 using {@link RpcProgram}. * Nfs server. Supports NFS v3 using {@link RpcProgram}.
@ -72,19 +67,7 @@ public abstract class Nfs3Base {
private void startTCPServer() { private void startTCPServer() {
SimpleTcpServer tcpServer = new SimpleTcpServer(nfsPort, SimpleTcpServer tcpServer = new SimpleTcpServer(nfsPort,
rpcProgram, 0) { rpcProgram, 0);
@Override
public ChannelPipelineFactory getPipelineFactory() {
return new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() {
return Channels.pipeline(
RpcUtil.constructRpcFrameDecoder(),
new SimpleTcpServerHandler(rpcProgram));
}
};
}
};
tcpServer.run(); tcpServer.run();
} }
} }

View File

@ -97,6 +97,6 @@ public interface Nfs3Interface {
InetAddress client); InetAddress client);
/** COMMIT: Commit cached data on a server to stable storage */ /** COMMIT: Commit cached data on a server to stable storage */
public NFS3Response commit(XDR xdr, SecurityHandler securityHandler, public NFS3Response commit(XDR xdr, Channel channel, int xid,
InetAddress client); SecurityHandler securityHandler, InetAddress client);
} }

View File

@ -28,8 +28,8 @@ import org.apache.hadoop.oncrpc.XDR;
* WRITE3 Request * WRITE3 Request
*/ */
public class WRITE3Request extends RequestWithHandle { public class WRITE3Request extends RequestWithHandle {
private final long offset; private long offset;
private final int count; private int count;
private final WriteStableHow stableHow; private final WriteStableHow stableHow;
private final ByteBuffer data; private final ByteBuffer data;
@ -54,10 +54,18 @@ public class WRITE3Request extends RequestWithHandle {
return this.offset; return this.offset;
} }
public void setOffset(long offset) {
this.offset = offset;
}
public int getCount() { public int getCount() {
return this.count; return this.count;
} }
public void setCount(int count) {
this.count = count;
}
public WriteStableHow getStableHow() { public WriteStableHow getStableHow() {
return this.stableHow; return this.stableHow;
} }

View File

@ -26,6 +26,8 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier; import org.apache.hadoop.oncrpc.security.Verifier;
import com.google.common.annotations.VisibleForTesting;
/** /**
* READDIR3 Response * READDIR3 Response
*/ */
@ -49,7 +51,8 @@ public class READDIR3Response extends NFS3Response {
return fileId; return fileId;
} }
String getName() { @VisibleForTesting
public String getName() {
return name; return name;
} }
@ -66,6 +69,11 @@ public class READDIR3Response extends NFS3Response {
this.entries = Collections.unmodifiableList(Arrays.asList(entries)); this.entries = Collections.unmodifiableList(Arrays.asList(entries));
this.eof = eof; this.eof = eof;
} }
@VisibleForTesting
public List<Entry3> getEntries() {
return this.entries;
}
} }
public READDIR3Response(int status) { public READDIR3Response(int status) {

View File

@ -27,6 +27,8 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier; import org.apache.hadoop.oncrpc.security.Verifier;
import com.google.common.annotations.VisibleForTesting;
/** /**
* READDIRPLUS3 Response * READDIRPLUS3 Response
*/ */
@ -51,6 +53,11 @@ public class READDIRPLUS3Response extends NFS3Response {
this.objFileHandle = objFileHandle; this.objFileHandle = objFileHandle;
} }
@VisibleForTesting
public String getName() {
return name;
}
void seralize(XDR xdr) { void seralize(XDR xdr) {
xdr.writeLongAsHyper(fileId); xdr.writeLongAsHyper(fileId);
xdr.writeString(name); xdr.writeString(name);
@ -71,7 +78,8 @@ public class READDIRPLUS3Response extends NFS3Response {
this.eof = eof; this.eof = eof;
} }
List<EntryPlus3> getEntries() { @VisibleForTesting
public List<EntryPlus3> getEntries() {
return entries; return entries;
} }
@ -80,6 +88,11 @@ public class READDIRPLUS3Response extends NFS3Response {
} }
} }
@VisibleForTesting
public DirListPlus3 getDirListPlus() {
return dirListPlus;
}
public READDIRPLUS3Response(int status) { public READDIRPLUS3Response(int status) {
this(status, null, 0, null); this(status, null, 0, null);
} }

View File

@ -44,7 +44,7 @@ import com.google.common.annotations.VisibleForTesting;
public class RpcCallCache { public class RpcCallCache {
public static class CacheEntry { public static class CacheEntry {
private XDR response; // null if no response has been sent private RpcResponse response; // null if no response has been sent
public CacheEntry() { public CacheEntry() {
response = null; response = null;
@ -58,11 +58,11 @@ public class RpcCallCache {
return response != null; return response != null;
} }
public XDR getResponse() { public RpcResponse getResponse() {
return response; return response;
} }
public void setResponse(XDR response) { public void setResponse(RpcResponse response) {
this.response = response; this.response = response;
} }
} }
@ -128,13 +128,13 @@ public class RpcCallCache {
} }
/** Mark a request as completed and add corresponding response to the cache */ /** Mark a request as completed and add corresponding response to the cache */
public void callCompleted(InetAddress clientId, int xid, XDR response) { public void callCompleted(InetAddress clientId, int xid, RpcResponse response) {
ClientRequest req = new ClientRequest(clientId, xid); ClientRequest req = new ClientRequest(clientId, xid);
CacheEntry e; CacheEntry e;
synchronized(map) { synchronized(map) {
e = map.get(req); e = map.get(req);
} }
e.setResponse(response); e.response = response;
} }
/** /**

View File

@ -0,0 +1,60 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.net.SocketAddress;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelHandlerContext;
/**
* RpcInfo records all contextual information of an RPC message. It contains
* the RPC header, the parameters, and the information of the remote peer.
*/
public final class RpcInfo {
private final RpcMessage header;
private final ChannelBuffer data;
private final Channel channel;
private final SocketAddress remoteAddress;
public RpcInfo(RpcMessage header, ChannelBuffer data,
ChannelHandlerContext channelContext, Channel channel,
SocketAddress remoteAddress) {
this.header = header;
this.data = data;
this.channel = channel;
this.remoteAddress = remoteAddress;
}
public RpcMessage header() {
return header;
}
public ChannelBuffer data() {
return data;
}
public Channel channel() {
return channel;
}
public SocketAddress remoteAddress() {
return remoteAddress;
}
}

View File

@ -18,22 +18,24 @@
package org.apache.hadoop.oncrpc; package org.apache.hadoop.oncrpc;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState; import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
import org.apache.hadoop.oncrpc.RpcCallCache.CacheEntry; import org.apache.hadoop.oncrpc.security.Verifier;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.apache.hadoop.portmap.PortmapMapping; import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.portmap.PortmapRequest; import org.apache.hadoop.portmap.PortmapRequest;
import org.jboss.netty.channel.Channel; import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
/** /**
* Class for writing RPC server programs based on RFC 1050. Extend this class * Class for writing RPC server programs based on RFC 1050. Extend this class
* and implement {@link #handleInternal} to handle the requests received. * and implement {@link #handleInternal} to handle the requests received.
*/ */
public abstract class RpcProgram { public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
private static final Log LOG = LogFactory.getLog(RpcProgram.class); private static final Log LOG = LogFactory.getLog(RpcProgram.class);
public static final int RPCB_PORT = 111; public static final int RPCB_PORT = 111;
private final String program; private final String program;
@ -42,7 +44,6 @@ public abstract class RpcProgram {
private final int progNumber; private final int progNumber;
private final int lowProgVersion; private final int lowProgVersion;
private final int highProgVersion; private final int highProgVersion;
private final RpcCallCache rpcCallCache;
/** /**
* Constructor * Constructor
@ -53,19 +54,15 @@ public abstract class RpcProgram {
* @param progNumber program number as defined in RFC 1050 * @param progNumber program number as defined in RFC 1050
* @param lowProgVersion lowest version of the specification supported * @param lowProgVersion lowest version of the specification supported
* @param highProgVersion highest version of the specification supported * @param highProgVersion highest version of the specification supported
* @param cacheSize size of cache to handle duplciate requests. Size <= 0
* indicates no cache.
*/ */
protected RpcProgram(String program, String host, int port, int progNumber, protected RpcProgram(String program, String host, int port, int progNumber,
int lowProgVersion, int highProgVersion, int cacheSize) { int lowProgVersion, int highProgVersion) {
this.program = program; this.program = program;
this.host = host; this.host = host;
this.port = port; this.port = port;
this.progNumber = progNumber; this.progNumber = progNumber;
this.lowProgVersion = lowProgVersion; this.lowProgVersion = lowProgVersion;
this.highProgVersion = highProgVersion; this.highProgVersion = highProgVersion;
this.rpcCallCache = cacheSize > 0 ? new RpcCallCache(program, cacheSize)
: null;
} }
/** /**
@ -103,92 +100,50 @@ public abstract class RpcProgram {
} }
} }
/** @Override
* Handle an RPC request. public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
* @param rpcCall RPC call that is received throws Exception {
* @param in xdr with cursor at reading the remaining bytes of a method call RpcInfo info = (RpcInfo) e.getMessage();
* @param out xdr output corresponding to Rpc reply RpcCall call = (RpcCall) info.header();
* @param client making the Rpc request if (LOG.isTraceEnabled()) {
* @param channel connection over which Rpc request is received LOG.trace(program + " procedure #" + call.getProcedure());
* @return response xdr response
*/
protected abstract XDR handleInternal(RpcCall rpcCall, XDR in, XDR out,
InetAddress client, Channel channel);
public XDR handle(XDR xdr, InetAddress client, Channel channel) {
XDR out = new XDR();
RpcCall rpcCall = RpcCall.read(xdr);
if (LOG.isDebugEnabled()) {
LOG.debug(program + " procedure #" + rpcCall.getProcedure());
} }
if (!checkProgram(rpcCall.getProgram())) { if (this.progNumber != call.getProgram()) {
return programMismatch(out, rpcCall); LOG.warn("Invalid RPC call program " + call.getProgram());
RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
AcceptState.PROG_UNAVAIL, Verifier.VERIFIER_NONE);
XDR out = new XDR();
reply.write(out);
ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
.buffer());
RpcResponse rsp = new RpcResponse(b, info.remoteAddress());
RpcUtil.sendRpcResponse(ctx, rsp);
return;
} }
if (!checkProgramVersion(rpcCall.getVersion())) { int ver = call.getVersion();
return programVersionMismatch(out, rpcCall); if (ver < lowProgVersion || ver > highProgVersion) {
LOG.warn("Invalid RPC call version " + ver);
RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
AcceptState.PROG_MISMATCH, Verifier.VERIFIER_NONE);
XDR out = new XDR();
reply.write(out);
out.writeInt(lowProgVersion);
out.writeInt(highProgVersion);
ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
.buffer());
RpcResponse rsp = new RpcResponse(b, info.remoteAddress());
RpcUtil.sendRpcResponse(ctx, rsp);
return;
} }
// Check for duplicate requests in the cache for non-idempotent requests handleInternal(ctx, info);
boolean idempotent = rpcCallCache != null && !isIdempotent(rpcCall);
if (idempotent) {
CacheEntry entry = rpcCallCache.checkOrAddToCache(client, rpcCall.getXid());
if (entry != null) { // in ache
if (entry.isCompleted()) {
LOG.info("Sending the cached reply to retransmitted request "
+ rpcCall.getXid());
return entry.getResponse();
} else { // else request is in progress
LOG.info("Retransmitted request, transaction still in progress "
+ rpcCall.getXid());
// TODO: ignore the request?
}
}
}
XDR response = handleInternal(rpcCall, xdr, out, client, channel);
if (response.size() == 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("No sync response, expect an async response for request XID="
+ rpcCall.getXid());
}
}
// Add the request to the cache
if (idempotent) {
rpcCallCache.callCompleted(client, rpcCall.getXid(), response);
}
return response;
}
private XDR programMismatch(XDR out, RpcCall call) {
LOG.warn("Invalid RPC call program " + call.getProgram());
RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
AcceptState.PROG_UNAVAIL, new VerifierNone());
reply.write(out);
return out;
}
private XDR programVersionMismatch(XDR out, RpcCall call) {
LOG.warn("Invalid RPC call version " + call.getVersion());
RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
AcceptState.PROG_MISMATCH, new VerifierNone());
reply.write(out);
out.writeInt(lowProgVersion);
out.writeInt(highProgVersion);
return out;
}
private boolean checkProgram(int progNumber) {
return this.progNumber == progNumber;
}
/** Return true if a the program version in rpcCall is supported */
private boolean checkProgramVersion(int programVersion) {
return programVersion >= lowProgVersion
&& programVersion <= highProgVersion;
} }
protected abstract void handleInternal(ChannelHandlerContext ctx, RpcInfo info);
@Override @Override
public String toString() { public String toString() {

View File

@ -0,0 +1,45 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.net.SocketAddress;
import org.jboss.netty.buffer.ChannelBuffer;
/**
* RpcResponse encapsulates a response to a RPC request. It contains the data
* that is going to cross the wire, as well as the information of the remote
* peer.
*/
public class RpcResponse {
private final ChannelBuffer data;
private final SocketAddress remoteAddress;
public RpcResponse(ChannelBuffer data, SocketAddress remoteAddress) {
this.data = data;
this.remoteAddress = remoteAddress;
}
public ChannelBuffer data() {
return data;
}
public SocketAddress remoteAddress() {
return remoteAddress;
}
}

View File

@ -17,17 +17,23 @@
*/ */
package org.apache.hadoop.oncrpc; package org.apache.hadoop.oncrpc;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelHandlerContext; import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.handler.codec.frame.FrameDecoder; import org.jboss.netty.handler.codec.frame.FrameDecoder;
public class RpcUtil { public final class RpcUtil {
/** /**
* The XID in RPC call. It is used for starting with new seed after each reboot. * The XID in RPC call. It is used for starting with new seed after each
* reboot.
*/ */
private static int xid = (int) (System.currentTimeMillis() / 1000) << 12; private static int xid = (int) (System.currentTimeMillis() / 1000) << 12;
@ -35,10 +41,27 @@ public class RpcUtil {
return xid = ++xid + caller.hashCode(); return xid = ++xid + caller.hashCode();
} }
public static void sendRpcResponse(ChannelHandlerContext ctx,
RpcResponse response) {
Channels.fireMessageReceived(ctx, response);
}
public static FrameDecoder constructRpcFrameDecoder() { public static FrameDecoder constructRpcFrameDecoder() {
return new RpcFrameDecoder(); return new RpcFrameDecoder();
} }
public static final SimpleChannelUpstreamHandler STAGE_RPC_MESSAGE_PARSER = new RpcMessageParserStage();
public static final SimpleChannelUpstreamHandler STAGE_RPC_TCP_RESPONSE = new RpcTcpResponseStage();
public static final SimpleChannelUpstreamHandler STAGE_RPC_UDP_RESPONSE = new RpcUdpResponseStage();
/**
* An RPC client can separate a RPC message into several frames (i.e.,
* fragments) when transferring it across the wire. RpcFrameDecoder
* reconstructs a full RPC message from these fragments.
*
* RpcFrameDecoder is a stateful pipeline stage. It has to be constructed for
* each RPC client.
*/
static class RpcFrameDecoder extends FrameDecoder { static class RpcFrameDecoder extends FrameDecoder {
public static final Log LOG = LogFactory.getLog(RpcFrameDecoder.class); public static final Log LOG = LogFactory.getLog(RpcFrameDecoder.class);
private ChannelBuffer currentFrame; private ChannelBuffer currentFrame;
@ -78,4 +101,68 @@ public class RpcUtil {
} }
} }
} }
/**
* RpcMessageParserStage parses the network bytes and encapsulates the RPC
* request into a RpcInfo instance.
*/
static final class RpcMessageParserStage extends SimpleChannelUpstreamHandler {
private static final Log LOG = LogFactory
.getLog(RpcMessageParserStage.class);
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
ChannelBuffer buf = (ChannelBuffer) e.getMessage();
ByteBuffer b = buf.toByteBuffer().asReadOnlyBuffer();
XDR in = new XDR(b, XDR.State.READING);
RpcInfo info = null;
try {
RpcCall callHeader = RpcCall.read(in);
ChannelBuffer dataBuffer = ChannelBuffers.wrappedBuffer(in.buffer()
.slice());
info = new RpcInfo(callHeader, dataBuffer, ctx, e.getChannel(),
e.getRemoteAddress());
} catch (Exception exc) {
LOG.info("Malfromed RPC request from " + e.getRemoteAddress());
}
if (info != null) {
Channels.fireMessageReceived(ctx, info);
}
}
}
/**
* RpcTcpResponseStage sends an RpcResponse across the wire with the
* appropriate fragment header.
*/
private static class RpcTcpResponseStage extends SimpleChannelUpstreamHandler {
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
RpcResponse r = (RpcResponse) e.getMessage();
byte[] fragmentHeader = XDR.recordMark(r.data().readableBytes(), true);
ChannelBuffer header = ChannelBuffers.wrappedBuffer(fragmentHeader);
ChannelBuffer d = ChannelBuffers.wrappedBuffer(header, r.data());
e.getChannel().write(d);
}
}
/**
* RpcUdpResponseStage sends an RpcResponse as a UDP packet, which does not
* require a fragment header.
*/
private static final class RpcUdpResponseStage extends
SimpleChannelUpstreamHandler {
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
RpcResponse r = (RpcResponse) e.getMessage();
e.getChannel().write(r.data(), r.remoteAddress());
}
}
} }

View File

@ -27,6 +27,7 @@ import org.jboss.netty.channel.ChannelFactory;
import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelPipelineFactory; import org.jboss.netty.channel.ChannelPipelineFactory;
import org.jboss.netty.channel.Channels; import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory; import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
/** /**
@ -35,8 +36,7 @@ import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
public class SimpleTcpServer { public class SimpleTcpServer {
public static final Log LOG = LogFactory.getLog(SimpleTcpServer.class); public static final Log LOG = LogFactory.getLog(SimpleTcpServer.class);
protected final int port; protected final int port;
protected final ChannelPipelineFactory pipelineFactory; protected final SimpleChannelUpstreamHandler rpcProgram;
protected final RpcProgram rpcProgram;
/** The maximum number of I/O worker threads */ /** The maximum number of I/O worker threads */
protected final int workerCount; protected final int workerCount;
@ -50,18 +50,6 @@ public class SimpleTcpServer {
this.port = port; this.port = port;
this.rpcProgram = program; this.rpcProgram = program;
this.workerCount = workercount; this.workerCount = workercount;
this.pipelineFactory = getPipelineFactory();
}
public ChannelPipelineFactory getPipelineFactory() {
return new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() {
return Channels.pipeline(
RpcUtil.constructRpcFrameDecoder(),
new SimpleTcpServerHandler(rpcProgram));
}
};
} }
public void run() { public void run() {
@ -78,7 +66,15 @@ public class SimpleTcpServer {
} }
ServerBootstrap bootstrap = new ServerBootstrap(factory); ServerBootstrap bootstrap = new ServerBootstrap(factory);
bootstrap.setPipelineFactory(pipelineFactory); bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
return Channels.pipeline(RpcUtil.constructRpcFrameDecoder(),
RpcUtil.STAGE_RPC_MESSAGE_PARSER, rpcProgram,
RpcUtil.STAGE_RPC_TCP_RESPONSE);
}
});
bootstrap.setOption("child.tcpNoDelay", true); bootstrap.setOption("child.tcpNoDelay", true);
bootstrap.setOption("child.keepAlive", true); bootstrap.setOption("child.keepAlive", true);

View File

@ -1,63 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelHandler;
/**
* Handler used by {@link SimpleTcpServer}.
*/
public class SimpleTcpServerHandler extends SimpleChannelHandler {
public static final Log LOG = LogFactory.getLog(SimpleTcpServerHandler.class);
protected final RpcProgram rpcProgram;
public SimpleTcpServerHandler(RpcProgram rpcProgram) {
this.rpcProgram = rpcProgram;
}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
ChannelBuffer buf = (ChannelBuffer) e.getMessage();
XDR request = new XDR(buf.toByteBuffer().asReadOnlyBuffer(), XDR.State.READING);
InetAddress remoteInetAddr = ((InetSocketAddress) ctx.getChannel()
.getRemoteAddress()).getAddress();
Channel outChannel = e.getChannel();
XDR response = rpcProgram.handle(request, remoteInetAddr, outChannel);
if (response.size() > 0) {
outChannel.write(XDR.writeMessageTcp(response, true));
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
LOG.warn("Encountered ", e.getCause());
e.getChannel().close();
}
}

View File

@ -23,9 +23,8 @@ import java.util.concurrent.Executors;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.jboss.netty.bootstrap.ConnectionlessBootstrap; import org.jboss.netty.bootstrap.ConnectionlessBootstrap;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelPipelineFactory;
import org.jboss.netty.channel.Channels; import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.channel.socket.DatagramChannelFactory; import org.jboss.netty.channel.socket.DatagramChannelFactory;
import org.jboss.netty.channel.socket.nio.NioDatagramChannelFactory; import org.jboss.netty.channel.socket.nio.NioDatagramChannelFactory;
@ -38,20 +37,13 @@ public class SimpleUdpServer {
private final int RECEIVE_BUFFER_SIZE = 65536; private final int RECEIVE_BUFFER_SIZE = 65536;
protected final int port; protected final int port;
protected final ChannelPipelineFactory pipelineFactory; protected final SimpleChannelUpstreamHandler rpcProgram;
protected final RpcProgram rpcProgram;
protected final int workerCount; protected final int workerCount;
public SimpleUdpServer(int port, RpcProgram program, int workerCount) { public SimpleUdpServer(int port, SimpleChannelUpstreamHandler program, int workerCount) {
this.port = port; this.port = port;
this.rpcProgram = program; this.rpcProgram = program;
this.workerCount = workerCount; this.workerCount = workerCount;
this.pipelineFactory = new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() {
return Channels.pipeline(new SimpleUdpServerHandler(rpcProgram));
}
};
} }
public void run() { public void run() {
@ -60,8 +52,9 @@ public class SimpleUdpServer {
Executors.newCachedThreadPool(), workerCount); Executors.newCachedThreadPool(), workerCount);
ConnectionlessBootstrap b = new ConnectionlessBootstrap(f); ConnectionlessBootstrap b = new ConnectionlessBootstrap(f);
ChannelPipeline p = b.getPipeline(); b.setPipeline(Channels.pipeline(
p.addLast("handler", new SimpleUdpServerHandler(rpcProgram)); RpcUtil.STAGE_RPC_MESSAGE_PARSER, rpcProgram,
RpcUtil.STAGE_RPC_UDP_RESPONSE));
b.setOption("broadcast", "false"); b.setOption("broadcast", "false");
b.setOption("sendBufferSize", SEND_BUFFER_SIZE); b.setOption("sendBufferSize", SEND_BUFFER_SIZE);

View File

@ -1,61 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelHandler;
/**
* Handler used by {@link SimpleUdpServer}.
*/
public class SimpleUdpServerHandler extends SimpleChannelHandler {
public static final Log LOG = LogFactory.getLog(SimpleUdpServerHandler.class);
private final RpcProgram rpcProgram;
public SimpleUdpServerHandler(RpcProgram rpcProgram) {
this.rpcProgram = rpcProgram;
}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
ChannelBuffer buf = (ChannelBuffer) e.getMessage();
XDR request = new XDR(buf.toByteBuffer().asReadOnlyBuffer(), XDR.State.READING);
InetAddress remoteInetAddr = ((InetSocketAddress) e.getRemoteAddress())
.getAddress();
XDR response = rpcProgram.handle(request, remoteInetAddr, null);
e.getChannel().write(XDR.writeMessageUdp(response.asReadOnlyWrap()),
e.getRemoteAddress());
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
LOG.warn("Encountered ", e.getCause());
e.getChannel().close();
}
}

View File

@ -93,6 +93,10 @@ public final class XDR {
return n; return n;
} }
public ByteBuffer buffer() {
return buf.duplicate();
}
public int size() { public int size() {
// TODO: This overloading intends to be compatible with the semantics of // TODO: This overloading intends to be compatible with the semantics of
// the previous version of the class. This function should be separated into // the previous version of the class. This function should be separated into
@ -219,7 +223,7 @@ public final class XDR {
return xdr.buf.remaining() >= len; return xdr.buf.remaining() >= len;
} }
private static byte[] recordMark(int size, boolean last) { static byte[] recordMark(int size, boolean last) {
byte[] b = new byte[SIZEOF_INT]; byte[] b = new byte[SIZEOF_INT];
ByteBuffer buf = ByteBuffer.wrap(b); ByteBuffer buf = ByteBuffer.wrap(b);
buf.putInt(!last ? size : size | 0x80000000); buf.putInt(!last ? size : size | 0x80000000);
@ -259,9 +263,8 @@ public final class XDR {
@VisibleForTesting @VisibleForTesting
public byte[] getBytes() { public byte[] getBytes() {
ByteBuffer d = buf.duplicate(); ByteBuffer d = asReadOnlyWrap().buffer();
byte[] b = new byte[d.position()]; byte[] b = new byte[d.remaining()];
d.flip();
d.get(b); d.get(b);
return b; return b;

View File

@ -18,16 +18,17 @@
package org.apache.hadoop.oncrpc.security; package org.apache.hadoop.oncrpc.security;
import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
/** /**
* Base class for verifier. Currently our authentication only supports 3 types * Base class for verifier. Currently our authentication only supports 3 types
* of auth flavors: {@link AuthFlavor#AUTH_NONE}, {@link AuthFlavor#AUTH_SYS}, * of auth flavors: {@link RpcAuthInfo.AuthFlavor#AUTH_NONE}, {@link RpcAuthInfo.AuthFlavor#AUTH_SYS},
* and {@link AuthFlavor#RPCSEC_GSS}. Thus for verifier we only need to handle * and {@link RpcAuthInfo.AuthFlavor#RPCSEC_GSS}. Thus for verifier we only need to handle
* AUTH_NONE and RPCSEC_GSS * AUTH_NONE and RPCSEC_GSS
*/ */
public abstract class Verifier extends RpcAuthInfo { public abstract class Verifier extends RpcAuthInfo {
public static final Verifier VERIFIER_NONE = new VerifierNone();
protected Verifier(AuthFlavor flavor) { protected Verifier(AuthFlavor flavor) {
super(flavor); super(flavor);
} }
@ -61,6 +62,4 @@ public abstract class Verifier extends RpcAuthInfo {
} }
verifier.write(xdr); verifier.write(xdr);
} }
} }

View File

@ -17,7 +17,6 @@
*/ */
package org.apache.hadoop.portmap; package org.apache.hadoop.portmap;
import java.net.InetAddress;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.Set; import java.util.Set;
@ -26,10 +25,15 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcAcceptedReply;
import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcCall;
import org.apache.hadoop.oncrpc.RpcInfo;
import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.RpcResponse;
import org.apache.hadoop.oncrpc.RpcUtil;
import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.jboss.netty.channel.Channel; import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.ChannelHandlerContext;
/** /**
* An rpcbind request handler. * An rpcbind request handler.
@ -44,7 +48,7 @@ public class RpcProgramPortmap extends RpcProgram implements PortmapInterface {
private final HashMap<String, PortmapMapping> map; private final HashMap<String, PortmapMapping> map;
public RpcProgramPortmap() { public RpcProgramPortmap() {
super("portmap", "localhost", RPCB_PORT, PROGRAM, VERSION, VERSION, 0); super("portmap", "localhost", RPCB_PORT, PROGRAM, VERSION, VERSION);
map = new HashMap<String, PortmapMapping>(256); map = new HashMap<String, PortmapMapping>(256);
} }
@ -130,10 +134,15 @@ public class RpcProgramPortmap extends RpcProgram implements PortmapInterface {
} }
@Override @Override
public XDR handleInternal(RpcCall rpcCall, XDR in, XDR out, public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
InetAddress client, Channel channel) { RpcCall rpcCall = (RpcCall) info.header();
final Procedure portmapProc = Procedure.fromValue(rpcCall.getProcedure()); final Procedure portmapProc = Procedure.fromValue(rpcCall.getProcedure());
int xid = rpcCall.getXid(); int xid = rpcCall.getXid();
byte[] data = new byte[info.data().readableBytes()];
info.data().readBytes(data);
XDR in = new XDR(data);
XDR out = new XDR();
if (portmapProc == Procedure.PMAPPROC_NULL) { if (portmapProc == Procedure.PMAPPROC_NULL) {
out = nullOp(xid, in, out); out = nullOp(xid, in, out);
} else if (portmapProc == Procedure.PMAPPROC_SET) { } else if (portmapProc == Procedure.PMAPPROC_SET) {
@ -148,11 +157,14 @@ public class RpcProgramPortmap extends RpcProgram implements PortmapInterface {
out = getport(xid, in, out); out = getport(xid, in, out);
} else { } else {
LOG.info("PortmapHandler unknown rpc procedure=" + portmapProc); LOG.info("PortmapHandler unknown rpc procedure=" + portmapProc);
RpcAcceptedReply.getInstance(xid, RpcAcceptedReply reply = RpcAcceptedReply.getInstance(xid,
RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone());
out); reply.write(out);
} }
return out;
ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
RpcUtil.sendRpcResponse(ctx, rsp);
} }
@Override @Override

View File

@ -22,7 +22,6 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.net.InetAddress;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder; import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
@ -30,6 +29,7 @@ import org.apache.hadoop.oncrpc.security.CredentialsNone;
import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer; import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelHandlerContext; import org.jboss.netty.channel.ChannelHandlerContext;
import org.junit.Test; import org.junit.Test;
@ -38,7 +38,7 @@ import org.mockito.Mockito;
public class TestFrameDecoder { public class TestFrameDecoder {
private static int port = 12345; // some random server port private static int port = 12345; // some random server port
private static XDR result = null; private static int resultSize;
static void testRequest(XDR request) { static void testRequest(XDR request) {
SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", port, request, SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", port, request,
@ -49,18 +49,20 @@ public class TestFrameDecoder {
static class TestRpcProgram extends RpcProgram { static class TestRpcProgram extends RpcProgram {
protected TestRpcProgram(String program, String host, int port, protected TestRpcProgram(String program, String host, int port,
int progNumber, int lowProgVersion, int highProgVersion, int cacheSize) { int progNumber, int lowProgVersion, int highProgVersion) {
super(program, host, port, progNumber, lowProgVersion, highProgVersion, super(program, host, port, progNumber, lowProgVersion, highProgVersion);
cacheSize);
} }
@Override @Override
public XDR handleInternal(RpcCall rpcCall, XDR in, XDR out, protected void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
InetAddress client, Channel channel) { resultSize = info.data().readableBytes();
// Get the final complete request and return a void response. RpcAcceptedReply reply = RpcAcceptedReply.getAcceptInstance(1234,
result = in; new VerifierNone());
RpcAcceptedReply.getAcceptInstance(1234, new VerifierNone()).write(out); XDR out = new XDR();
return out; reply.write(out);
ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
RpcResponse rsp = new RpcResponse(b, info.remoteAddress());
RpcUtil.sendRpcResponse(ctx, rsp);
} }
@Override @Override
@ -147,21 +149,22 @@ public class TestFrameDecoder {
public void testFrames() { public void testFrames() {
RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram", RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
"localhost", port, 100000, 1, 2, 100); "localhost", port, 100000, 1, 2);
SimpleTcpServer tcpServer = new SimpleTcpServer(port, program, 1); SimpleTcpServer tcpServer = new SimpleTcpServer(port, program, 1);
tcpServer.run(); tcpServer.run();
XDR xdrOut = createGetportMount(); XDR xdrOut = createGetportMount();
int headerSize = xdrOut.size();
int bufsize = 2 * 1024 * 1024; int bufsize = 2 * 1024 * 1024;
byte[] buffer = new byte[bufsize]; byte[] buffer = new byte[bufsize];
xdrOut.writeFixedOpaque(buffer); xdrOut.writeFixedOpaque(buffer);
int requestSize = xdrOut.size(); int requestSize = xdrOut.size() - headerSize;
// Send the request to the server // Send the request to the server
testRequest(xdrOut); testRequest(xdrOut);
// Verify the server got the request with right size // Verify the server got the request with right size
assertTrue(requestSize == result.size()); assertEquals(requestSize, resultSize);
} }
static void createPortmapXDRheader(XDR xdr_out, int procedure) { static void createPortmapXDRheader(XDR xdr_out, int procedure) {
@ -173,10 +176,6 @@ public class TestFrameDecoder {
static XDR createGetportMount() { static XDR createGetportMount() {
XDR xdr_out = new XDR(); XDR xdr_out = new XDR();
createPortmapXDRheader(xdr_out, 3); createPortmapXDRheader(xdr_out, 3);
xdr_out.writeInt(0); // AUTH_NULL
xdr_out.writeInt(0); // cred len
xdr_out.writeInt(0); // verifier AUTH_NULL
xdr_out.writeInt(0); // verf len
return xdr_out; return xdr_out;
} }
/* /*

View File

@ -32,6 +32,8 @@ import org.apache.hadoop.oncrpc.RpcCallCache.CacheEntry;
import org.apache.hadoop.oncrpc.RpcCallCache.ClientRequest; import org.apache.hadoop.oncrpc.RpcCallCache.ClientRequest;
import org.junit.Test; import org.junit.Test;
import static org.mockito.Mockito.*;
/** /**
* Unit tests for {@link RpcCallCache} * Unit tests for {@link RpcCallCache}
*/ */
@ -67,7 +69,7 @@ public class TestRpcCallCache {
validateInprogressCacheEntry(e); validateInprogressCacheEntry(e);
// Set call as completed // Set call as completed
XDR response = new XDR(); RpcResponse response = mock(RpcResponse.class);
cache.callCompleted(clientIp, xid, response); cache.callCompleted(clientIp, xid, response);
e = cache.checkOrAddToCache(clientIp, xid); e = cache.checkOrAddToCache(clientIp, xid);
validateCompletedCacheEntry(e, response); validateCompletedCacheEntry(e, response);
@ -79,7 +81,7 @@ public class TestRpcCallCache {
assertNull(c.getResponse()); assertNull(c.getResponse());
} }
private void validateCompletedCacheEntry(CacheEntry c, XDR response) { private void validateCompletedCacheEntry(CacheEntry c, RpcResponse response) {
assertFalse(c.isInProgress()); assertFalse(c.isInProgress());
assertTrue(c.isCompleted()); assertTrue(c.isCompleted());
assertEquals(response, c.getResponse()); assertEquals(response, c.getResponse());
@ -93,7 +95,7 @@ public class TestRpcCallCache {
assertFalse(c.isCompleted()); assertFalse(c.isCompleted());
assertNull(c.getResponse()); assertNull(c.getResponse());
XDR response = new XDR(); RpcResponse response = mock(RpcResponse.class);
c.setResponse(response); c.setResponse(response);
validateCompletedCacheEntry(c, response); validateCompletedCacheEntry(c, response);
} }

View File

@ -418,7 +418,11 @@ public class Server {
Properties props = new Properties(); Properties props = new Properties();
try { try {
InputStream is = getResource(DEFAULT_LOG4J_PROPERTIES); InputStream is = getResource(DEFAULT_LOG4J_PROPERTIES);
props.load(is); try {
props.load(is);
} finally {
is.close();
}
} catch (IOException ex) { } catch (IOException ex) {
throw new ServerException(ServerException.ERROR.S03, DEFAULT_LOG4J_PROPERTIES, ex.getMessage(), ex); throw new ServerException(ServerException.ERROR.S03, DEFAULT_LOG4J_PROPERTIES, ex.getMessage(), ex);
} }

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.nfs.mount;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
@ -38,10 +39,15 @@ import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcAcceptedReply;
import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcCall;
import org.apache.hadoop.oncrpc.RpcInfo;
import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.RpcResponse;
import org.apache.hadoop.oncrpc.RpcUtil;
import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.jboss.netty.channel.Channel; import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.ChannelHandlerContext;
/** /**
* RPC program corresponding to mountd daemon. See {@link Mountd}. * RPC program corresponding to mountd daemon. See {@link Mountd}.
@ -77,7 +83,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
throws IOException { throws IOException {
// Note that RPC cache is not enabled // Note that RPC cache is not enabled
super("mountd", "localhost", config.getInt("nfs3.mountd.port", PORT), super("mountd", "localhost", config.getInt("nfs3.mountd.port", PORT),
PROGRAM, VERSION_1, VERSION_3, 0); PROGRAM, VERSION_1, VERSION_3);
this.hostsMatcher = NfsExports.getInstance(config); this.hostsMatcher = NfsExports.getInstance(config);
this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>()); this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
@ -173,10 +179,16 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
} }
@Override @Override
public XDR handleInternal(RpcCall rpcCall, XDR xdr, XDR out, public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
InetAddress client, Channel channel) { RpcCall rpcCall = (RpcCall) info.header();
final MNTPROC mntproc = MNTPROC.fromValue(rpcCall.getProcedure()); final MNTPROC mntproc = MNTPROC.fromValue(rpcCall.getProcedure());
int xid = rpcCall.getXid(); int xid = rpcCall.getXid();
byte[] data = new byte[info.data().readableBytes()];
info.data().readBytes(data);
XDR xdr = new XDR(data);
XDR out = new XDR();
InetAddress client = ((InetSocketAddress) info.remoteAddress()).getAddress();
if (mntproc == MNTPROC.NULL) { if (mntproc == MNTPROC.NULL) {
out = nullOp(out, xid, client); out = nullOp(out, xid, client);
} else if (mntproc == MNTPROC.MNT) { } else if (mntproc == MNTPROC.MNT) {
@ -198,7 +210,9 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
out); out);
} }
return out; ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
RpcUtil.sendRpcResponse(ctx, rsp);
} }
@Override @Override

View File

@ -115,6 +115,14 @@ public class Nfs3Utils {
ChannelBuffer outBuf = XDR.writeMessageTcp(out, true); ChannelBuffer outBuf = XDR.writeMessageTcp(out, true);
channel.write(outBuf); channel.write(outBuf);
} }
public static void writeChannelCommit(Channel channel, XDR out, int xid) {
if (RpcProgramNfs3.LOG.isDebugEnabled()) {
RpcProgramNfs3.LOG.debug("Commit done:" + xid);
}
ChannelBuffer outBuf = XDR.writeMessageTcp(out, true);
channel.write(outBuf);
}
private static boolean isSet(int access, int bits) { private static boolean isSet(int access, int bits) {
return (access & bits) == bits; return (access & bits) == bits;

View File

@ -22,6 +22,7 @@ import java.io.FileNotFoundException;
import java.io.FileOutputStream; import java.io.FileOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException; import java.nio.channels.ClosedChannelException;
import java.security.InvalidParameterException; import java.security.InvalidParameterException;
import java.util.EnumSet; import java.util.EnumSet;
@ -47,6 +48,7 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response;
import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
import org.apache.hadoop.nfs.nfs3.response.WccAttr; import org.apache.hadoop.nfs.nfs3.response.WccAttr;
import org.apache.hadoop.nfs.nfs3.response.WccData; import org.apache.hadoop.nfs.nfs3.response.WccData;
@ -55,6 +57,7 @@ import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.Channel;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
/** /**
@ -67,12 +70,18 @@ class OpenFileCtx {
// Pending writes water mark for dump, 1MB // Pending writes water mark for dump, 1MB
private static long DUMP_WRITE_WATER_MARK = 1024 * 1024; private static long DUMP_WRITE_WATER_MARK = 1024 * 1024;
public final static int COMMIT_FINISHED = 0; static enum COMMIT_STATUS {
public final static int COMMIT_WAIT = 1; COMMIT_FINISHED,
public final static int COMMIT_INACTIVE_CTX = 2; COMMIT_WAIT,
public final static int COMMIT_INACTIVE_WITH_PENDING_WRITE = 3; COMMIT_INACTIVE_CTX,
public final static int COMMIT_ERROR = 4; COMMIT_INACTIVE_WITH_PENDING_WRITE,
COMMIT_ERROR,
COMMIT_DO_SYNC;
}
private final DFSClient client;
private final IdUserGroup iug;
// The stream status. False means the stream is closed. // The stream status. False means the stream is closed.
private volatile boolean activeState; private volatile boolean activeState;
// The stream write-back status. True means one thread is doing write back. // The stream write-back status. True means one thread is doing write back.
@ -85,11 +94,58 @@ class OpenFileCtx {
private AtomicLong nextOffset; private AtomicLong nextOffset;
private final HdfsDataOutputStream fos; private final HdfsDataOutputStream fos;
// TODO: make it mutable and update it after each writing back to HDFS // It's updated after each sync to HDFS
private final Nfs3FileAttributes latestAttr; private Nfs3FileAttributes latestAttr;
private final ConcurrentNavigableMap<OffsetRange, WriteCtx> pendingWrites; private final ConcurrentNavigableMap<OffsetRange, WriteCtx> pendingWrites;
private final ConcurrentNavigableMap<Long, CommitCtx> pendingCommits;
static class CommitCtx {
private final long offset;
private final Channel channel;
private final int xid;
private final Nfs3FileAttributes preOpAttr;
// Remember time for debug purpose
private final long startTime;
long getOffset() {
return offset;
}
Channel getChannel() {
return channel;
}
int getXid() {
return xid;
}
Nfs3FileAttributes getPreOpAttr() {
return preOpAttr;
}
long getStartTime() {
return startTime;
}
CommitCtx(long offset, Channel channel, int xid,
Nfs3FileAttributes preOpAttr) {
this.offset = offset;
this.channel = channel;
this.xid = xid;
this.preOpAttr = preOpAttr;
this.startTime = System.currentTimeMillis();
}
@Override
public String toString() {
return String.format("offset: %d xid: %d startTime: %d", offset, xid,
startTime);
}
}
// The last write, commit request or write-back event. Updating time to keep // The last write, commit request or write-back event. Updating time to keep
// output steam alive. // output steam alive.
private long lastAccessTime; private long lastAccessTime;
@ -128,7 +184,7 @@ class OpenFileCtx {
} }
OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr, OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
String dumpFilePath) { String dumpFilePath, DFSClient client, IdUserGroup iug) {
this.fos = fos; this.fos = fos;
this.latestAttr = latestAttr; this.latestAttr = latestAttr;
// We use the ReverseComparatorOnMin as the comparator of the map. In this // We use the ReverseComparatorOnMin as the comparator of the map. In this
@ -136,6 +192,9 @@ class OpenFileCtx {
// retrieve the last element to write back to HDFS. // retrieve the last element to write back to HDFS.
pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>( pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>(
OffsetRange.ReverseComparatorOnMin); OffsetRange.ReverseComparatorOnMin);
pendingCommits = new ConcurrentSkipListMap<Long, CommitCtx>();
updateLastAccessTime(); updateLastAccessTime();
activeState = true; activeState = true;
asyncStatus = false; asyncStatus = false;
@ -149,6 +208,8 @@ class OpenFileCtx {
nextOffset.set(latestAttr.getSize()); nextOffset.set(latestAttr.getSize());
assert(nextOffset.get() == this.fos.getPos()); assert(nextOffset.get() == this.fos.getPos());
dumpThread = null; dumpThread = null;
this.client = client;
this.iug = iug;
} }
public Nfs3FileAttributes getLatestAttr() { public Nfs3FileAttributes getLatestAttr() {
@ -360,6 +421,30 @@ class OpenFileCtx {
} }
} }
@VisibleForTesting
public static void alterWriteRequest(WRITE3Request request, long cachedOffset) {
long offset = request.getOffset();
int count = request.getCount();
long smallerCount = offset + count - cachedOffset;
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Got overwrite with appended data (%d-%d),"
+ " current offset %d," + " drop the overlapped section (%d-%d)"
+ " and append new data (%d-%d).", offset, (offset + count - 1),
cachedOffset, offset, (cachedOffset - 1), cachedOffset, (offset
+ count - 1)));
}
ByteBuffer data = request.getData();
Preconditions.checkState(data.position() == 0,
"The write request data has non-zero position");
data.position((int) (cachedOffset - offset));
Preconditions.checkState(data.limit() - data.position() == smallerCount,
"The write request buffer has wrong limit/position regarding count");
request.setOffset(cachedOffset);
request.setCount((int) smallerCount);
}
/** /**
* Creates and adds a WriteCtx into the pendingWrites map. This is a * Creates and adds a WriteCtx into the pendingWrites map. This is a
* synchronized method to handle concurrent writes. * synchronized method to handle concurrent writes.
@ -372,12 +457,40 @@ class OpenFileCtx {
long offset = request.getOffset(); long offset = request.getOffset();
int count = request.getCount(); int count = request.getCount();
long cachedOffset = nextOffset.get(); long cachedOffset = nextOffset.get();
int originalCount = WriteCtx.INVALID_ORIGINAL_COUNT;
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("requesed offset=" + offset + " and current offset=" LOG.debug("requesed offset=" + offset + " and current offset="
+ cachedOffset); + cachedOffset);
} }
// Handle a special case first
if ((offset < cachedOffset) && (offset + count > cachedOffset)) {
// One Linux client behavior: after a file is closed and reopened to
// write, the client sometimes combines previous written data(could still
// be in kernel buffer) with newly appended data in one write. This is
// usually the first write after file reopened. In this
// case, we log the event and drop the overlapped section.
LOG.warn(String.format("Got overwrite with appended data (%d-%d),"
+ " current offset %d," + " drop the overlapped section (%d-%d)"
+ " and append new data (%d-%d).", offset, (offset + count - 1),
cachedOffset, offset, (cachedOffset - 1), cachedOffset, (offset
+ count - 1)));
if (!pendingWrites.isEmpty()) {
LOG.warn("There are other pending writes, fail this jumbo write");
return null;
}
LOG.warn("Modify this write to write only the appended data");
alterWriteRequest(request, cachedOffset);
// Update local variable
originalCount = count;
offset = request.getOffset();
count = request.getCount();
}
// Fail non-append call // Fail non-append call
if (offset < cachedOffset) { if (offset < cachedOffset) {
LOG.warn("(offset,count,nextOffset):" + "(" + offset + "," + count + "," LOG.warn("(offset,count,nextOffset):" + "(" + offset + "," + count + ","
@ -387,8 +500,9 @@ class OpenFileCtx {
DataState dataState = offset == cachedOffset ? WriteCtx.DataState.NO_DUMP DataState dataState = offset == cachedOffset ? WriteCtx.DataState.NO_DUMP
: WriteCtx.DataState.ALLOW_DUMP; : WriteCtx.DataState.ALLOW_DUMP;
WriteCtx writeCtx = new WriteCtx(request.getHandle(), WriteCtx writeCtx = new WriteCtx(request.getHandle(),
request.getOffset(), request.getCount(), request.getStableHow(), request.getOffset(), request.getCount(), originalCount,
request.getData().array(), channel, xid, false, dataState); request.getStableHow(), request.getData(), channel, xid, false,
dataState);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Add new write to the list with nextOffset " + cachedOffset LOG.debug("Add new write to the list with nextOffset " + cachedOffset
+ " and requesed offset=" + offset); + " and requesed offset=" + offset);
@ -419,8 +533,7 @@ class OpenFileCtx {
WRITE3Response response; WRITE3Response response;
long cachedOffset = nextOffset.get(); long cachedOffset = nextOffset.get();
if (offset + count > cachedOffset) { if (offset + count > cachedOffset) {
LOG.warn("Haven't noticed any partial overwrite for a sequential file" LOG.warn("Treat this jumbo write as a real random write, no support.");
+ " write requests. Treat it as a real random write, no support.");
response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL, wccData, 0, response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL, wccData, 0,
WriteStableHow.UNSTABLE, Nfs3Constant.WRITE_COMMIT_VERF); WriteStableHow.UNSTABLE, Nfs3Constant.WRITE_COMMIT_VERF);
} else { } else {
@ -491,19 +604,23 @@ class OpenFileCtx {
// of reordered writes and won't send more writes until it gets // of reordered writes and won't send more writes until it gets
// responses of the previous batch. So here send response immediately // responses of the previous batch. So here send response immediately
// for unstable non-sequential write // for unstable non-sequential write
if (request.getStableHow() == WriteStableHow.UNSTABLE) { if (stableHow != WriteStableHow.UNSTABLE) {
if (LOG.isDebugEnabled()) { LOG.info("Have to change stable write to unstable write:"
LOG.debug("UNSTABLE write request, send response for offset: " + request.getStableHow());
+ writeCtx.getOffset()); stableHow = WriteStableHow.UNSTABLE;
}
WccData fileWcc = new WccData(preOpAttr, latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils
.writeChannel(channel, response.writeHeaderAndResponse(new XDR(),
xid, new VerifierNone()), xid);
writeCtx.setReplied(true);
} }
if (LOG.isDebugEnabled()) {
LOG.debug("UNSTABLE write request, send response for offset: "
+ writeCtx.getOffset());
}
WccData fileWcc = new WccData(preOpAttr, latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils
.writeChannel(channel, response.writeHeaderAndResponse(new XDR(),
xid, new VerifierNone()), xid);
writeCtx.setReplied(true);
} }
} }
} }
@ -581,58 +698,92 @@ class OpenFileCtx {
return response; return response;
} }
public COMMIT_STATUS checkCommit(DFSClient dfsClient, long commitOffset,
Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
// Keep stream active
updateLastAccessTime();
Preconditions.checkState(commitOffset >= 0);
COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
preOpAttr);
if (LOG.isDebugEnabled()) {
LOG.debug("Got commit status: " + ret.name());
}
// Do the sync outside the lock
if (ret == COMMIT_STATUS.COMMIT_DO_SYNC
|| ret == COMMIT_STATUS.COMMIT_FINISHED) {
try {
// Sync file data and length
fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// Nothing to do for metadata since attr related change is pass-through
} catch (ClosedChannelException cce) {
if (pendingWrites.isEmpty()) {
ret = COMMIT_STATUS.COMMIT_FINISHED;
} else {
ret = COMMIT_STATUS.COMMIT_ERROR;
}
} catch (IOException e) {
LOG.error("Got stream error during data sync:" + e);
// Do nothing. Stream will be closed eventually by StreamMonitor.
// status = Nfs3Status.NFS3ERR_IO;
ret = COMMIT_STATUS.COMMIT_ERROR;
}
}
return ret;
}
/** /**
* return one commit status: COMMIT_FINISHED, COMMIT_WAIT, * return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
* COMMIT_INACTIVE_CTX, COMMIT_ERROR * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR
*/ */
public int checkCommit(long commitOffset) { private synchronized COMMIT_STATUS checkCommitInternal(long commitOffset,
return activeState ? checkCommitInternal(commitOffset) Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
: COMMIT_INACTIVE_CTX; if (!activeState) {
} if (pendingWrites.isEmpty()) {
return COMMIT_STATUS.COMMIT_INACTIVE_CTX;
private int checkCommitInternal(long commitOffset) { } else {
if (commitOffset == 0) { // TODO: return success if already committed
// Commit whole file return COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE;
commitOffset = nextOffset.get(); }
} }
long flushed = getFlushedOffset(); long flushed = getFlushedOffset();
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("getFlushedOffset=" + flushed + " commitOffset=" + commitOffset); LOG.debug("getFlushedOffset=" + flushed + " commitOffset=" + commitOffset);
} }
if (flushed < commitOffset) {
// Keep stream active
updateLastAccessTime();
return COMMIT_WAIT;
}
int ret = COMMIT_WAIT; if (commitOffset > 0) {
try { if (commitOffset > flushed) {
// Sync file data and length CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid,
fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); preOpAttr);
// Nothing to do for metadata since attr related change is pass-through pendingCommits.put(commitOffset, commitCtx);
ret = COMMIT_FINISHED; return COMMIT_STATUS.COMMIT_WAIT;
} catch (ClosedChannelException cce) {
ret = COMMIT_INACTIVE_CTX;
if (pendingWrites.isEmpty()) {
ret = COMMIT_INACTIVE_CTX;
} else { } else {
ret = COMMIT_INACTIVE_WITH_PENDING_WRITE; return COMMIT_STATUS.COMMIT_DO_SYNC;
} }
} catch (IOException e) {
LOG.error("Got stream error during data sync:" + e);
// Do nothing. Stream will be closed eventually by StreamMonitor.
ret = COMMIT_ERROR;
} }
// Keep stream active Entry<OffsetRange, WriteCtx> key = pendingWrites.firstEntry();
updateLastAccessTime();
return ret; // Commit whole file, commitOffset == 0
if (pendingWrites.isEmpty()) {
// Note that, there is no guarantee data is synced. TODO: We could still
// do a sync here though the output stream might be closed.
return COMMIT_STATUS.COMMIT_FINISHED;
} else {
// Insert commit
long maxOffset = key.getKey().getMax() - 1;
Preconditions.checkState(maxOffset > 0);
CommitCtx commitCtx = new CommitCtx(maxOffset, channel, xid, preOpAttr);
pendingCommits.put(maxOffset, commitCtx);
return COMMIT_STATUS.COMMIT_WAIT;
}
} }
private void addWrite(WriteCtx writeCtx) { private void addWrite(WriteCtx writeCtx) {
long offset = writeCtx.getOffset(); long offset = writeCtx.getOffset();
int count = writeCtx.getCount(); int count = writeCtx.getCount();
// For the offset range (min, max), min is inclusive, and max is exclusive
pendingWrites.put(new OffsetRange(offset, offset + count), writeCtx); pendingWrites.put(new OffsetRange(offset, offset + count), writeCtx);
} }
@ -671,8 +822,18 @@ class OpenFileCtx {
LOG.debug("The asyn write task has no pending writes, fileId: " LOG.debug("The asyn write task has no pending writes, fileId: "
+ latestAttr.getFileId()); + latestAttr.getFileId());
} }
// process pending commit again to handle this race: a commit is added
// to pendingCommits map just after the last doSingleWrite returns.
// There is no pending write and the commit should be handled by the
// last doSingleWrite. Due to the race, the commit is left along and
// can't be processed until cleanup. Therefore, we should do another
// processCommits to fix the race issue.
processCommits(nextOffset.get()); // nextOffset has same value as
// flushedOffset
this.asyncStatus = false; this.asyncStatus = false;
} else { return null;
}
Entry<OffsetRange, WriteCtx> lastEntry = pendingWrites.lastEntry(); Entry<OffsetRange, WriteCtx> lastEntry = pendingWrites.lastEntry();
OffsetRange range = lastEntry.getKey(); OffsetRange range = lastEntry.getKey();
WriteCtx toWrite = lastEntry.getValue(); WriteCtx toWrite = lastEntry.getValue();
@ -687,6 +848,7 @@ class OpenFileCtx {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("The next sequencial write has not arrived yet"); LOG.debug("The next sequencial write has not arrived yet");
} }
processCommits(nextOffset.get()); // handle race
this.asyncStatus = false; this.asyncStatus = false;
} else if (range.getMin() < offset && range.getMax() > offset) { } else if (range.getMin() < offset && range.getMax() > offset) {
// shouldn't happen since we do sync for overlapped concurrent writers // shouldn't happen since we do sync for overlapped concurrent writers
@ -694,6 +856,7 @@ class OpenFileCtx {
+ range.getMax() + "), nextOffset=" + offset + range.getMax() + "), nextOffset=" + offset
+ ". Silently drop it now"); + ". Silently drop it now");
pendingWrites.remove(range); pendingWrites.remove(range);
processCommits(nextOffset.get()); // handle race
} else { } else {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Remove write(" + range.getMin() + "-" + range.getMax() LOG.debug("Remove write(" + range.getMin() + "-" + range.getMax()
@ -708,7 +871,7 @@ class OpenFileCtx {
} }
return toWrite; return toWrite;
} }
}
return null; return null;
} }
@ -730,7 +893,7 @@ class OpenFileCtx {
if (!activeState && LOG.isDebugEnabled()) { if (!activeState && LOG.isDebugEnabled()) {
LOG.debug("The openFileCtx is not active anymore, fileId: " LOG.debug("The openFileCtx is not active anymore, fileId: "
+ +latestAttr.getFileId()); + latestAttr.getFileId());
} }
} finally { } finally {
// make sure we reset asyncStatus to false // make sure we reset asyncStatus to false
@ -738,6 +901,69 @@ class OpenFileCtx {
} }
} }
private void processCommits(long offset) {
Preconditions.checkState(offset > 0);
long flushedOffset = getFlushedOffset();
Entry<Long, CommitCtx> entry = pendingCommits.firstEntry();
if (entry == null || entry.getValue().offset > flushedOffset) {
return;
}
// Now do sync for the ready commits
int status = Nfs3Status.NFS3ERR_IO;
try {
// Sync file data and length
fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
status = Nfs3Status.NFS3_OK;
} catch (ClosedChannelException cce) {
if (!pendingWrites.isEmpty()) {
LOG.error("Can't sync for fileId: " + latestAttr.getFileId()
+ ". Channel closed with writes pending");
}
status = Nfs3Status.NFS3ERR_IO;
} catch (IOException e) {
LOG.error("Got stream error during data sync:" + e);
// Do nothing. Stream will be closed eventually by StreamMonitor.
status = Nfs3Status.NFS3ERR_IO;
}
// Update latestAttr
try {
latestAttr = Nfs3Utils.getFileAttr(client,
Nfs3Utils.getFileIdPath(latestAttr.getFileId()), iug);
} catch (IOException e) {
LOG.error("Can't get new file attr for fileId: " + latestAttr.getFileId());
status = Nfs3Status.NFS3ERR_IO;
}
if (latestAttr.getSize() != offset) {
LOG.error("After sync, the expect file size: " + offset
+ ", however actual file size is: " + latestAttr.getSize());
status = Nfs3Status.NFS3ERR_IO;
}
WccData wccData = new WccData(Nfs3Utils.getWccAttr(latestAttr), latestAttr);
// Send response for the ready commits
while (entry != null && entry.getValue().offset <= flushedOffset) {
pendingCommits.remove(entry.getKey());
CommitCtx commit = entry.getValue();
COMMIT3Response response = new COMMIT3Response(status, wccData,
Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannelCommit(commit.getChannel(), response
.writeHeaderAndResponse(new XDR(), commit.getXid(),
new VerifierNone()), commit.getXid());
if (LOG.isDebugEnabled()) {
LOG.debug("FileId: " + latestAttr.getFileid() + " Service time:"
+ (System.currentTimeMillis() - commit.getStartTime())
+ "ms. Sent response for commit:" + commit);
}
entry = pendingCommits.firstEntry();
}
}
private void doSingleWrite(final WriteCtx writeCtx) { private void doSingleWrite(final WriteCtx writeCtx) {
Channel channel = writeCtx.getChannel(); Channel channel = writeCtx.getChannel();
int xid = writeCtx.getXid(); int xid = writeCtx.getXid();
@ -745,19 +971,7 @@ class OpenFileCtx {
long offset = writeCtx.getOffset(); long offset = writeCtx.getOffset();
int count = writeCtx.getCount(); int count = writeCtx.getCount();
WriteStableHow stableHow = writeCtx.getStableHow(); WriteStableHow stableHow = writeCtx.getStableHow();
byte[] data = null;
try {
data = writeCtx.getData();
} catch (Exception e1) {
LOG.error("Failed to get request data offset:" + offset + " count:"
+ count + " error:" + e1);
// Cleanup everything
cleanup();
return;
}
Preconditions.checkState(data.length == count);
FileHandle handle = writeCtx.getHandle(); FileHandle handle = writeCtx.getHandle();
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("do write, fileId: " + handle.getFileId() + " offset: " LOG.debug("do write, fileId: " + handle.getFileId() + " offset: "
@ -766,8 +980,8 @@ class OpenFileCtx {
try { try {
// The write is not protected by lock. asyncState is used to make sure // The write is not protected by lock. asyncState is used to make sure
// there is one thread doing write back at any time // there is one thread doing write back at any time
fos.write(data, 0, count); writeCtx.writeData(fos);
long flushedOffset = getFlushedOffset(); long flushedOffset = getFlushedOffset();
if (flushedOffset != (offset + count)) { if (flushedOffset != (offset + count)) {
@ -776,10 +990,6 @@ class OpenFileCtx {
+ (offset + count)); + (offset + count));
} }
if (LOG.isDebugEnabled()) {
LOG.debug("After writing " + handle.getFileId() + " at offset "
+ offset + ", update the memory count.");
}
// Reduce memory occupation size if request was allowed dumped // Reduce memory occupation size if request was allowed dumped
if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) { if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) {
@ -787,6 +997,11 @@ class OpenFileCtx {
if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) { if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) {
writeCtx.setDataState(WriteCtx.DataState.NO_DUMP); writeCtx.setDataState(WriteCtx.DataState.NO_DUMP);
updateNonSequentialWriteInMemory(-count); updateNonSequentialWriteInMemory(-count);
if (LOG.isDebugEnabled()) {
LOG.debug("After writing " + handle.getFileId() + " at offset "
+ offset + ", updated the memory count, new value:"
+ nonSequentialWriteInMemory.get());
}
} }
} }
} }
@ -794,14 +1009,23 @@ class OpenFileCtx {
if (!writeCtx.getReplied()) { if (!writeCtx.getReplied()) {
WccAttr preOpAttr = latestAttr.getWccAttr(); WccAttr preOpAttr = latestAttr.getWccAttr();
WccData fileWcc = new WccData(preOpAttr, latestAttr); WccData fileWcc = new WccData(preOpAttr, latestAttr);
if (writeCtx.getOriginalCount() != WriteCtx.INVALID_ORIGINAL_COUNT) {
LOG.warn("Return original count:" + writeCtx.getOriginalCount()
+ " instead of real data count:" + count);
count = writeCtx.getOriginalCount();
}
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK, WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse( Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
new XDR(), xid, new VerifierNone()), xid); new XDR(), xid, new VerifierNone()), xid);
} }
// Handle the waiting commits without holding any lock
processCommits(writeCtx.getOffset() + writeCtx.getCount());
} catch (IOException e) { } catch (IOException e) {
LOG.error("Error writing to fileId " + handle.getFileId() + " at offset " LOG.error("Error writing to fileId " + handle.getFileId() + " at offset "
+ offset + " and length " + data.length, e); + offset + " and length " + count, e);
if (!writeCtx.getReplied()) { if (!writeCtx.getReplied()) {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO); WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse( Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
@ -880,4 +1104,29 @@ class OpenFileCtx {
} }
} }
} }
@VisibleForTesting
ConcurrentNavigableMap<OffsetRange, WriteCtx> getPendingWritesForTest(){
return pendingWrites;
}
@VisibleForTesting
ConcurrentNavigableMap<Long, CommitCtx> getPendingCommitsForTest(){
return pendingCommits;
}
@VisibleForTesting
long getNextOffsetForTest() {
return nextOffset.get();
}
@VisibleForTesting
void setNextOffsetForTest(long newValue) {
nextOffset.set(newValue);
}
@VisibleForTesting
void setActiveStatusForTest(boolean activeState) {
this.activeState = activeState;
}
} }

View File

@ -21,6 +21,7 @@ import java.io.File;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.EnumSet; import java.util.EnumSet;
@ -29,6 +30,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
@ -43,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.nfs.AccessPrivilege; import org.apache.hadoop.nfs.AccessPrivilege;
import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.nfs.NfsExports;
import org.apache.hadoop.nfs.NfsFileType; import org.apache.hadoop.nfs.NfsFileType;
@ -103,9 +106,13 @@ import org.apache.hadoop.nfs.nfs3.response.WccAttr;
import org.apache.hadoop.nfs.nfs3.response.WccData; import org.apache.hadoop.nfs.nfs3.response.WccData;
import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcAcceptedReply;
import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcCall;
import org.apache.hadoop.oncrpc.RpcCallCache;
import org.apache.hadoop.oncrpc.RpcDeniedReply; import org.apache.hadoop.oncrpc.RpcDeniedReply;
import org.apache.hadoop.oncrpc.RpcInfo;
import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.RpcReply; import org.apache.hadoop.oncrpc.RpcReply;
import org.apache.hadoop.oncrpc.RpcResponse;
import org.apache.hadoop.oncrpc.RpcUtil;
import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Credentials; import org.apache.hadoop.oncrpc.security.Credentials;
import org.apache.hadoop.oncrpc.security.CredentialsSys; import org.apache.hadoop.oncrpc.security.CredentialsSys;
@ -115,7 +122,10 @@ import org.apache.hadoop.oncrpc.security.SysSecurityHandler;
import org.apache.hadoop.oncrpc.security.Verifier; import org.apache.hadoop.oncrpc.security.Verifier;
import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelHandlerContext;
/** /**
* RPC program corresponding to nfs daemon. See {@link Nfs3}. * RPC program corresponding to nfs daemon. See {@link Nfs3}.
@ -150,14 +160,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
private Statistics statistics; private Statistics statistics;
private String writeDumpDir; // The dir save dump files private String writeDumpDir; // The dir save dump files
private final RpcCallCache rpcCallCache;
public RpcProgramNfs3() throws IOException { public RpcProgramNfs3() throws IOException {
this(new Configuration()); this(new Configuration());
} }
public RpcProgramNfs3(Configuration config) public RpcProgramNfs3(Configuration config) throws IOException {
throws IOException {
super("NFS3", "localhost", Nfs3Constant.PORT, Nfs3Constant.PROGRAM, super("NFS3", "localhost", Nfs3Constant.PORT, Nfs3Constant.PROGRAM,
Nfs3Constant.VERSION, Nfs3Constant.VERSION, 100); Nfs3Constant.VERSION, Nfs3Constant.VERSION);
config.set(FsPermission.UMASK_LABEL, "000"); config.set(FsPermission.UMASK_LABEL, "000");
iug = new IdUserGroup(); iug = new IdUserGroup();
@ -183,6 +194,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} else { } else {
clearDirectory(writeDumpDir); clearDirectory(writeDumpDir);
} }
rpcCallCache = new RpcCallCache("NFS3", 256);
} }
private void clearDirectory(String writeDumpDir) throws IOException { private void clearDirectory(String writeDumpDir) throws IOException {
@ -213,8 +226,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
@Override @Override
public GETATTR3Response getattr(XDR xdr, public GETATTR3Response getattr(XDR xdr, SecurityHandler securityHandler,
SecurityHandler securityHandler, InetAddress client) { InetAddress client) {
GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK); GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
@ -294,8 +307,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
@Override @Override
public SETATTR3Response setattr(XDR xdr, public SETATTR3Response setattr(XDR xdr, SecurityHandler securityHandler,
SecurityHandler securityHandler, InetAddress client) { InetAddress client) {
SETATTR3Response response = new SETATTR3Response(Nfs3Status.NFS3_OK); SETATTR3Response response = new SETATTR3Response(Nfs3Status.NFS3_OK);
DFSClient dfsClient = clientCache.get(securityHandler.getUser()); DFSClient dfsClient = clientCache.get(securityHandler.getUser());
if (dfsClient == null) { if (dfsClient == null) {
@ -370,8 +383,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
@Override @Override
public LOOKUP3Response lookup(XDR xdr, public LOOKUP3Response lookup(XDR xdr, SecurityHandler securityHandler,
SecurityHandler securityHandler, InetAddress client) { InetAddress client) {
LOOKUP3Response response = new LOOKUP3Response(Nfs3Status.NFS3_OK); LOOKUP3Response response = new LOOKUP3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
@ -432,8 +445,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
@Override @Override
public ACCESS3Response access(XDR xdr, public ACCESS3Response access(XDR xdr, SecurityHandler securityHandler,
SecurityHandler securityHandler, InetAddress client) { InetAddress client) {
ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK); ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
@ -574,7 +587,6 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
long offset = request.getOffset(); long offset = request.getOffset();
int count = request.getCount(); int count = request.getCount();
FileHandle handle = request.getHandle(); FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset
@ -720,8 +732,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
@Override @Override
public CREATE3Response create(XDR xdr, public CREATE3Response create(XDR xdr, SecurityHandler securityHandler,
SecurityHandler securityHandler, InetAddress client) { InetAddress client) {
CREATE3Response response = new CREATE3Response(Nfs3Status.NFS3_OK); CREATE3Response response = new CREATE3Response(Nfs3Status.NFS3_OK);
DFSClient dfsClient = clientCache.get(securityHandler.getUser()); DFSClient dfsClient = clientCache.get(securityHandler.getUser());
if (dfsClient == null) { if (dfsClient == null) {
@ -830,7 +842,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
// Add open stream // Add open stream
OpenFileCtx openFileCtx = new OpenFileCtx(fos, postOpObjAttr, writeDumpDir OpenFileCtx openFileCtx = new OpenFileCtx(fos, postOpObjAttr, writeDumpDir
+ "/" + postOpObjAttr.getFileId()); + "/" + postOpObjAttr.getFileId(), dfsClient, iug);
fileHandle = new FileHandle(postOpObjAttr.getFileId()); fileHandle = new FileHandle(postOpObjAttr.getFileId());
writeManager.addOpenFileStream(fileHandle, openFileCtx); writeManager.addOpenFileStream(fileHandle, openFileCtx);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
@ -973,8 +985,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
String fileIdPath = dirFileIdPath + "/" + fileName; String fileIdPath = dirFileIdPath + "/" + fileName;
HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath);
fileIdPath);
if (fstat == null) { if (fstat == null) {
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
preOpDirAttr); preOpDirAttr);
@ -1056,8 +1067,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
String fileIdPath = dirFileIdPath + "/" + fileName; String fileIdPath = dirFileIdPath + "/" + fileName;
HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath);
fileIdPath);
if (fstat == null) { if (fstat == null) {
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, errWcc); return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
} }
@ -1098,8 +1108,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
@Override @Override
public RENAME3Response rename(XDR xdr, public RENAME3Response rename(XDR xdr, SecurityHandler securityHandler,
SecurityHandler securityHandler, InetAddress client) { InetAddress client) {
RENAME3Response response = new RENAME3Response(Nfs3Status.NFS3_OK); RENAME3Response response = new RENAME3Response(Nfs3Status.NFS3_OK);
DFSClient dfsClient = clientCache.get(securityHandler.getUser()); DFSClient dfsClient = clientCache.get(securityHandler.getUser());
if (dfsClient == null) { if (dfsClient == null) {
@ -1245,13 +1255,37 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
} }
public READDIR3Response link(XDR xdr, SecurityHandler securityHandler, InetAddress client) { public READDIR3Response link(XDR xdr, SecurityHandler securityHandler,
InetAddress client) {
return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP); return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP);
} }
/**
* Used by readdir and readdirplus to get dirents. It retries the listing if
* the startAfter can't be found anymore.
*/
private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
byte[] startAfter) throws IOException {
DirectoryListing dlisting = null;
try {
dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
} catch (RemoteException e) {
IOException io = e.unwrapRemoteException();
if (!(io instanceof DirectoryListingStartAfterNotFoundException)) {
throw io;
}
// This happens when startAfter was just deleted
LOG.info("Cookie cound't be found: " + new String(startAfter)
+ ", do listing from beginning");
dlisting = dfsClient
.listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
}
return dlisting;
}
@Override @Override
public READDIR3Response readdir(XDR xdr, public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler,
SecurityHandler securityHandler, InetAddress client) { InetAddress client) {
READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK); READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
@ -1289,7 +1323,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
+ cookie + " count: " + count); + cookie + " count: " + count);
} }
HdfsFileStatus dirStatus; HdfsFileStatus dirStatus = null;
DirectoryListing dlisting = null; DirectoryListing dlisting = null;
Nfs3FileAttributes postOpAttr = null; Nfs3FileAttributes postOpAttr = null;
long dotdotFileId = 0; long dotdotFileId = 0;
@ -1333,8 +1367,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
String inodeIdPath = Nfs3Utils.getFileIdPath(cookie); String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
startAfter = inodeIdPath.getBytes(); startAfter = inodeIdPath.getBytes();
} }
dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (postOpAttr == null) { if (postOpAttr == null) {
LOG.error("Can't get path for fileId:" + handle.getFileId()); LOG.error("Can't get path for fileId:" + handle.getFileId());
@ -1417,11 +1451,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
long dirCount = request.getDirCount(); long dirCount = request.getDirCount();
if (dirCount <= 0) { if (dirCount <= 0) {
LOG.info("Nonpositive count in invalid READDIRPLUS request:" + dirCount); LOG.info("Nonpositive dircount in invalid READDIRPLUS request:" + dirCount);
return new READDIRPLUS3Response(Nfs3Status.NFS3_OK); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
} }
int maxCount = request.getMaxCount(); int maxCount = request.getMaxCount();
if (maxCount <= 0) {
LOG.info("Nonpositive maxcount in invalid READDIRPLUS request:" + maxCount);
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
}
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: " LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: "
+ cookie + " dirCount: " + dirCount + " maxCount: " + maxCount); + cookie + " dirCount: " + dirCount + " maxCount: " + maxCount);
@ -1471,8 +1509,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
String inodeIdPath = Nfs3Utils.getFileIdPath(cookie); String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
startAfter = inodeIdPath.getBytes(); startAfter = inodeIdPath.getBytes();
} }
dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (postOpDirAttr == null) { if (postOpDirAttr == null) {
LOG.info("Can't get path for fileId:" + handle.getFileId()); LOG.info("Can't get path for fileId:" + handle.getFileId());
@ -1540,8 +1578,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
@Override @Override
public FSSTAT3Response fsstat(XDR xdr, public FSSTAT3Response fsstat(XDR xdr, SecurityHandler securityHandler,
SecurityHandler securityHandler, InetAddress client) { InetAddress client) {
FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK); FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
@ -1598,8 +1636,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
@Override @Override
public FSINFO3Response fsinfo(XDR xdr, public FSINFO3Response fsinfo(XDR xdr, SecurityHandler securityHandler,
SecurityHandler securityHandler, InetAddress client) { InetAddress client) {
FSINFO3Response response = new FSINFO3Response(Nfs3Status.NFS3_OK); FSINFO3Response response = new FSINFO3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
@ -1650,8 +1688,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
@Override @Override
public PATHCONF3Response pathconf(XDR xdr, public PATHCONF3Response pathconf(XDR xdr, SecurityHandler securityHandler,
SecurityHandler securityHandler, InetAddress client) { InetAddress client) {
PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK); PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
@ -1697,7 +1735,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
@Override @Override
public COMMIT3Response commit(XDR xdr, public COMMIT3Response commit(XDR xdr, Channel channel, int xid,
SecurityHandler securityHandler, InetAddress client) { SecurityHandler securityHandler, InetAddress client) {
COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK); COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK);
DFSClient dfsClient = clientCache.get(securityHandler.getUser()); DFSClient dfsClient = clientCache.get(securityHandler.getUser());
@ -1739,18 +1777,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
long commitOffset = (request.getCount() == 0) ? 0 long commitOffset = (request.getCount() == 0) ? 0
: (request.getOffset() + request.getCount()); : (request.getOffset() + request.getCount());
int status; // Insert commit as an async request
if (writeManager.handleCommit(handle, commitOffset)) { writeManager.handleCommit(dfsClient, handle, commitOffset, channel, xid,
status = Nfs3Status.NFS3_OK; preOpAttr);
} else { return null;
status = Nfs3Status.NFS3ERR_IO;
}
Nfs3FileAttributes postOpAttr = writeManager.getFileAttr(dfsClient,
handle, iug);
WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
return new COMMIT3Response(status, fileWcc,
Nfs3Constant.WRITE_COMMIT_VERF);
} catch (IOException e) { } catch (IOException e) {
LOG.warn("Exception ", e); LOG.warn("Exception ", e);
Nfs3FileAttributes postOpAttr = null; Nfs3FileAttributes postOpAttr = null;
@ -1776,25 +1806,53 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} }
@Override @Override
public XDR handleInternal(RpcCall rpcCall, final XDR xdr, XDR out, public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
InetAddress client, Channel channel) { RpcCall rpcCall = (RpcCall) info.header();
final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(rpcCall.getProcedure()); final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(rpcCall.getProcedure());
int xid = rpcCall.getXid(); int xid = rpcCall.getXid();
byte[] data = new byte[info.data().readableBytes()];
info.data().readBytes(data);
XDR xdr = new XDR(data);
XDR out = new XDR();
InetAddress client = ((InetSocketAddress) info.remoteAddress())
.getAddress();
Channel channel = info.channel();
Credentials credentials = rpcCall.getCredential(); Credentials credentials = rpcCall.getCredential();
// Ignore auth only for NFSPROC3_NULL, especially for Linux clients. // Ignore auth only for NFSPROC3_NULL, especially for Linux clients.
if (nfsproc3 != NFSPROC3.NULL) { if (nfsproc3 != NFSPROC3.NULL) {
if (rpcCall.getCredential().getFlavor() != AuthFlavor.AUTH_SYS if (credentials.getFlavor() != AuthFlavor.AUTH_SYS
&& rpcCall.getCredential().getFlavor() != AuthFlavor.RPCSEC_GSS) { && credentials.getFlavor() != AuthFlavor.RPCSEC_GSS) {
LOG.info("Wrong RPC AUTH flavor, " LOG.info("Wrong RPC AUTH flavor, " + credentials.getFlavor()
+ rpcCall.getCredential().getFlavor()
+ " is not AUTH_SYS or RPCSEC_GSS."); + " is not AUTH_SYS or RPCSEC_GSS.");
XDR reply = new XDR(); XDR reply = new XDR();
RpcDeniedReply rdr = new RpcDeniedReply(xid, RpcDeniedReply rdr = new RpcDeniedReply(xid,
RpcReply.ReplyState.MSG_ACCEPTED, RpcReply.ReplyState.MSG_ACCEPTED,
RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone()); RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
rdr.write(reply); rdr.write(reply);
return reply;
ChannelBuffer buf = ChannelBuffers.wrappedBuffer(reply.asReadOnlyWrap()
.buffer());
RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
RpcUtil.sendRpcResponse(ctx, rsp);
return;
}
}
if (!isIdempotent(rpcCall)) {
RpcCallCache.CacheEntry entry = rpcCallCache.checkOrAddToCache(client,
xid);
if (entry != null) { // in cache
if (entry.isCompleted()) {
LOG.info("Sending the cached reply to retransmitted request " + xid);
RpcUtil.sendRpcResponse(ctx, entry.getResponse());
return;
} else { // else request is in progress
LOG.info("Retransmitted request, transaction still in progress "
+ xid);
// Ignore the request and do nothing
return;
}
} }
} }
@ -1855,19 +1913,31 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} else if (nfsproc3 == NFSPROC3.PATHCONF) { } else if (nfsproc3 == NFSPROC3.PATHCONF) {
response = pathconf(xdr, securityHandler, client); response = pathconf(xdr, securityHandler, client);
} else if (nfsproc3 == NFSPROC3.COMMIT) { } else if (nfsproc3 == NFSPROC3.COMMIT) {
response = commit(xdr, securityHandler, client); response = commit(xdr, channel, xid, securityHandler, client);
} else { } else {
// Invalid procedure // Invalid procedure
RpcAcceptedReply.getInstance(xid, RpcAcceptedReply.getInstance(xid,
RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
out); out);
} }
if (response != null) { if (response == null) {
// TODO: currently we just return VerifierNone if (LOG.isDebugEnabled()) {
out = response.writeHeaderAndResponse(out, xid, new VerifierNone()); LOG.debug("No sync response, expect an async response for request XID="
+ rpcCall.getXid());
}
return;
}
// TODO: currently we just return VerifierNone
out = response.writeHeaderAndResponse(out, xid, new VerifierNone());
ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
.buffer());
RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
if (!isIdempotent(rpcCall)) {
rpcCallCache.callCompleted(client, xid, rsp);
} }
return out; RpcUtil.sendRpcResponse(ctx, rsp);
} }
@Override @Override

View File

@ -20,13 +20,16 @@ package org.apache.hadoop.hdfs.nfs.nfs3;
import java.io.FileOutputStream; import java.io.FileOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.Channel;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
/** /**
@ -50,8 +53,17 @@ class WriteCtx {
private final FileHandle handle; private final FileHandle handle;
private final long offset; private final long offset;
private final int count; private final int count;
//Only needed for overlapped write, referring OpenFileCtx.addWritesToCache()
private final int originalCount;
public static final int INVALID_ORIGINAL_COUNT = -1;
public int getOriginalCount() {
return originalCount;
}
private final WriteStableHow stableHow; private final WriteStableHow stableHow;
private volatile byte[] data; private volatile ByteBuffer data;
private final Channel channel; private final Channel channel;
private final int xid; private final int xid;
@ -89,9 +101,13 @@ class WriteCtx {
} }
return 0; return 0;
} }
// Resized write should not allow dump
Preconditions.checkState(originalCount == INVALID_ORIGINAL_COUNT);
this.raf = raf; this.raf = raf;
dumpFileOffset = dumpOut.getChannel().position(); dumpFileOffset = dumpOut.getChannel().position();
dumpOut.write(data, 0, count); dumpOut.write(data.array(), 0, count);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("After dump, new dumpFileOffset:" + dumpFileOffset); LOG.debug("After dump, new dumpFileOffset:" + dumpFileOffset);
} }
@ -127,7 +143,8 @@ class WriteCtx {
return stableHow; return stableHow;
} }
byte[] getData() throws IOException { @VisibleForTesting
ByteBuffer getData() throws IOException {
if (dataState != DataState.DUMPED) { if (dataState != DataState.DUMPED) {
synchronized (this) { synchronized (this) {
if (dataState != DataState.DUMPED) { if (dataState != DataState.DUMPED) {
@ -143,15 +160,45 @@ class WriteCtx {
private void loadData() throws IOException { private void loadData() throws IOException {
Preconditions.checkState(data == null); Preconditions.checkState(data == null);
data = new byte[count]; byte[] rawData = new byte[count];
raf.seek(dumpFileOffset); raf.seek(dumpFileOffset);
int size = raf.read(data, 0, count); int size = raf.read(rawData, 0, count);
if (size != count) { if (size != count) {
throw new IOException("Data count is " + count + ", but read back " throw new IOException("Data count is " + count + ", but read back "
+ size + "bytes"); + size + "bytes");
} }
data = ByteBuffer.wrap(rawData);
} }
public void writeData(HdfsDataOutputStream fos) throws IOException {
Preconditions.checkState(fos != null);
ByteBuffer dataBuffer = null;
try {
dataBuffer = getData();
} catch (Exception e1) {
LOG.error("Failed to get request data offset:" + offset + " count:"
+ count + " error:" + e1);
throw new IOException("Can't get WriteCtx.data");
}
byte[] data = dataBuffer.array();
int position = dataBuffer.position();
int limit = dataBuffer.limit();
Preconditions.checkState(limit - position == count);
// Modified write has a valid original count
if (position != 0) {
if (limit != getOriginalCount()) {
throw new IOException("Modified write has differnt original size."
+ "buff position:" + position + " buff limit:" + limit + ". "
+ toString());
}
}
// Now write data
fos.write(data, position, count);
}
Channel getChannel() { Channel getChannel() {
return channel; return channel;
} }
@ -168,11 +215,13 @@ class WriteCtx {
this.replied = replied; this.replied = replied;
} }
WriteCtx(FileHandle handle, long offset, int count, WriteStableHow stableHow, WriteCtx(FileHandle handle, long offset, int count, int originalCount,
byte[] data, Channel channel, int xid, boolean replied, DataState dataState) { WriteStableHow stableHow, ByteBuffer data, Channel channel, int xid,
boolean replied, DataState dataState) {
this.handle = handle; this.handle = handle;
this.offset = offset; this.offset = offset;
this.count = count; this.count = count;
this.originalCount = originalCount;
this.stableHow = stableHow; this.stableHow = stableHow;
this.data = data; this.data = data;
this.channel = channel; this.channel = channel;
@ -185,7 +234,7 @@ class WriteCtx {
@Override @Override
public String toString() { public String toString() {
return "Id:" + handle.getFileId() + " offset:" + offset + " count:" + count return "Id:" + handle.getFileId() + " offset:" + offset + " count:" + count
+ " stableHow:" + stableHow + " replied:" + replied + " dataState:" + " originalCount:" + originalCount + " stableHow:" + stableHow
+ dataState + " xid:" + xid; + " replied:" + replied + " dataState:" + dataState + " xid:" + xid;
} }
} }

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
import org.apache.hadoop.nfs.NfsFileType; import org.apache.hadoop.nfs.NfsFileType;
import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.IdUserGroup; import org.apache.hadoop.nfs.nfs3.IdUserGroup;
@ -36,6 +37,7 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response;
import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
import org.apache.hadoop.nfs.nfs3.response.WccData; import org.apache.hadoop.nfs.nfs3.response.WccData;
import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.XDR;
@ -166,7 +168,7 @@ public class WriteManager {
String writeDumpDir = config.get(Nfs3Constant.FILE_DUMP_DIR_KEY, String writeDumpDir = config.get(Nfs3Constant.FILE_DUMP_DIR_KEY,
Nfs3Constant.FILE_DUMP_DIR_DEFAULT); Nfs3Constant.FILE_DUMP_DIR_DEFAULT);
openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/" openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/"
+ fileHandle.getFileId()); + fileHandle.getFileId(), dfsClient, iug);
addOpenFileStream(fileHandle, openFileCtx); addOpenFileStream(fileHandle, openFileCtx);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("opened stream for file:" + fileHandle.getFileId()); LOG.debug("opened stream for file:" + fileHandle.getFileId());
@ -176,71 +178,55 @@ public class WriteManager {
// Add write into the async job queue // Add write into the async job queue
openFileCtx.receivedNewWrite(dfsClient, request, channel, xid, openFileCtx.receivedNewWrite(dfsClient, request, channel, xid,
asyncDataService, iug); asyncDataService, iug);
// Block stable write
if (request.getStableHow() != WriteStableHow.UNSTABLE) {
if (handleCommit(fileHandle, offset + count)) {
Nfs3FileAttributes postOpAttr = getFileAttr(dfsClient, handle, iug);
WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr),
postOpAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, request.getStableHow(),
Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
new XDR(), xid, new VerifierNone()), xid);
} else {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
new XDR(), xid, new VerifierNone()), xid);
}
}
return; return;
} }
boolean handleCommit(FileHandle fileHandle, long commitOffset) { void handleCommit(DFSClient dfsClient, FileHandle fileHandle,
long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
int status;
OpenFileCtx openFileCtx = openFileMap.get(fileHandle); OpenFileCtx openFileCtx = openFileMap.get(fileHandle);
if (openFileCtx == null) { if (openFileCtx == null) {
LOG.info("No opened stream for fileId:" + fileHandle.getFileId() LOG.info("No opened stream for fileId:" + fileHandle.getFileId()
+ " commitOffset=" + commitOffset); + " commitOffset=" + commitOffset + ". Return success in this case.");
return true; status = Nfs3Status.NFS3_OK;
}
long timeout = 30 * 1000; // 30 seconds
long startCommit = System.currentTimeMillis();
while (true) {
int ret = openFileCtx.checkCommit(commitOffset);
if (ret == OpenFileCtx.COMMIT_FINISHED) {
// Committed
return true;
} else if (ret == OpenFileCtx.COMMIT_INACTIVE_CTX) {
LOG.info("Inactive stream, fileId=" + fileHandle.getFileId()
+ " commitOffset=" + commitOffset);
return true;
} else if (ret == OpenFileCtx.COMMIT_INACTIVE_WITH_PENDING_WRITE) {
LOG.info("Inactive stream with pending writes, fileId="
+ fileHandle.getFileId() + " commitOffset=" + commitOffset);
return false;
}
assert (ret == OpenFileCtx.COMMIT_WAIT || ret == OpenFileCtx.COMMIT_ERROR);
if (ret == OpenFileCtx.COMMIT_ERROR) {
return false;
}
if (LOG.isDebugEnabled()) { } else {
LOG.debug("Not committed yet, wait., fileId=" + fileHandle.getFileId() COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset,
+ " commitOffset=" + commitOffset); channel, xid, preOpAttr);
switch (ret) {
case COMMIT_DO_SYNC:
case COMMIT_FINISHED:
case COMMIT_INACTIVE_CTX:
status = Nfs3Status.NFS3_OK;
break;
case COMMIT_INACTIVE_WITH_PENDING_WRITE:
case COMMIT_ERROR:
status = Nfs3Status.NFS3ERR_IO;
break;
case COMMIT_WAIT:
// Do nothing. Commit is async now.
return;
default:
throw new RuntimeException("Should not get commit return code:"
+ ret.name());
} }
if (System.currentTimeMillis() - startCommit > timeout) { }
// Commit took too long, return error
return false; // Send out the response
} Nfs3FileAttributes postOpAttr = null;
try { try {
Thread.sleep(100); String fileIdPath = Nfs3Utils.getFileIdPath(preOpAttr.getFileid());
} catch (InterruptedException e) { postOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
LOG.info("Commit is interrupted, fileId=" + fileHandle.getFileId() } catch (IOException e1) {
+ " commitOffset=" + commitOffset); LOG.info("Can't get postOpAttr for fileId: " + preOpAttr.getFileid());
return false; }
} WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
}// while COMMIT3Response response = new COMMIT3Response(status, fileWcc,
Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannelCommit(channel,
response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
xid);
} }
/** /**

View File

@ -0,0 +1,195 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.nfs;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3;
import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.response.READDIR3Response;
import org.apache.hadoop.nfs.nfs3.response.READDIR3Response.Entry3;
import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response;
import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response.EntryPlus3;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.SecurityHandler;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
/**
* Test READDIR and READDIRPLUS request with zero, nonzero cookies
*/
public class TestReaddir {
static Configuration config = new Configuration();
static MiniDFSCluster cluster = null;
static DistributedFileSystem hdfs;
static NameNode nn;
static RpcProgramNfs3 nfsd;
static String testdir = "/tmp";
static SecurityHandler securityHandler;
@BeforeClass
public static void setup() throws Exception {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
nn = cluster.getNameNode();
// Start nfs
List<String> exports = new ArrayList<String>();
exports.add("/");
Nfs3 nfs3 = new Nfs3(exports, config);
nfs3.start(false);
nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
securityHandler = Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandler.getUser()).thenReturn(
System.getProperty("user.name"));
}
@AfterClass
public static void shutdown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Before
public void createFiles() throws IllegalArgumentException, IOException {
hdfs.delete(new Path(testdir), true);
hdfs.mkdirs(new Path(testdir));
DFSTestUtil.createFile(hdfs, new Path(testdir + "/f1"), 0, (short) 1, 0);
DFSTestUtil.createFile(hdfs, new Path(testdir + "/f2"), 0, (short) 1, 0);
DFSTestUtil.createFile(hdfs, new Path(testdir + "/f3"), 0, (short) 1, 0);
}
@Test
public void testReaddirBasic() throws IOException {
// Get inodeId of /tmp
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
// Create related part of the XDR request
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0); // cookie
xdr_req.writeLongAsHyper(0); // verifier
xdr_req.writeInt(100); // count
READDIR3Response response = nfsd.readdir(xdr_req.asReadOnlyWrap(),
securityHandler, InetAddress.getLocalHost());
List<Entry3> dirents = response.getDirList().getEntries();
assertTrue(dirents.size() == 5); // inculding dot, dotdot
// Test start listing from f2
status = nn.getRpcServer().getFileInfo(testdir + "/f2");
long f2Id = status.getFileId();
// Create related part of the XDR request
xdr_req = new XDR();
handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(f2Id); // cookie
xdr_req.writeLongAsHyper(0); // verifier
xdr_req.writeInt(100); // count
response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler,
InetAddress.getLocalHost());
dirents = response.getDirList().getEntries();
assertTrue(dirents.size() == 1);
Entry3 entry = dirents.get(0);
assertTrue(entry.getName().equals("f3"));
// When the cookie is deleted, list starts over no including dot, dotdot
hdfs.delete(new Path(testdir + "/f2"), false);
response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler,
InetAddress.getLocalHost());
dirents = response.getDirList().getEntries();
assertTrue(dirents.size() == 2); // No dot, dotdot
}
@Test
// Test readdirplus
public void testReaddirPlus() throws IOException {
// Get inodeId of /tmp
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
// Create related part of the XDR request
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0); // cookie
xdr_req.writeLongAsHyper(0); // verifier
xdr_req.writeInt(100); // dirCount
xdr_req.writeInt(1000); // maxCount
READDIRPLUS3Response responsePlus = nfsd.readdirplus(
xdr_req.asReadOnlyWrap(), securityHandler, InetAddress.getLocalHost());
List<EntryPlus3> direntPlus = responsePlus.getDirListPlus().getEntries();
assertTrue(direntPlus.size() == 5); // including dot, dotdot
// Test start listing from f2
status = nn.getRpcServer().getFileInfo(testdir + "/f2");
long f2Id = status.getFileId();
// Create related part of the XDR request
xdr_req = new XDR();
handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(f2Id); // cookie
xdr_req.writeLongAsHyper(0); // verifier
xdr_req.writeInt(100); // dirCount
xdr_req.writeInt(1000); // maxCount
responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler,
InetAddress.getLocalHost());
direntPlus = responsePlus.getDirListPlus().getEntries();
assertTrue(direntPlus.size() == 1);
EntryPlus3 entryPlus = direntPlus.get(0);
assertTrue(entryPlus.getName().equals("f3"));
// When the cookie is deleted, list starts over no including dot, dotdot
hdfs.delete(new Path(testdir + "/f2"), false);
responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler,
InetAddress.getLocalHost());
direntPlus = responsePlus.getDirListPlus().getEntries();
assertTrue(direntPlus.size() == 2); // No dot, dotdot
}
}

View File

@ -0,0 +1,165 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.nfs.nfs3;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.concurrent.ConcurrentNavigableMap;
import junit.framework.Assert;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.IdUserGroup;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
import org.junit.Test;
import org.mockito.Mockito;
public class TestWrites {
@Test
public void testAlterWriteRequest() throws IOException {
int len = 20;
byte[] data = new byte[len];
ByteBuffer buffer = ByteBuffer.wrap(data);
for (int i = 0; i < len; i++) {
buffer.put((byte) i);
}
buffer.flip();
int originalCount = buffer.array().length;
WRITE3Request request = new WRITE3Request(new FileHandle(), 0, data.length,
WriteStableHow.UNSTABLE, buffer);
WriteCtx writeCtx1 = new WriteCtx(request.getHandle(), request.getOffset(),
request.getCount(), WriteCtx.INVALID_ORIGINAL_COUNT,
request.getStableHow(), request.getData(), null, 1, false,
WriteCtx.DataState.NO_DUMP);
Assert.assertTrue(writeCtx1.getData().array().length == originalCount);
// Now change the write request
OpenFileCtx.alterWriteRequest(request, 12);
WriteCtx writeCtx2 = new WriteCtx(request.getHandle(), request.getOffset(),
request.getCount(), originalCount, request.getStableHow(),
request.getData(), null, 2, false, WriteCtx.DataState.NO_DUMP);
ByteBuffer appendedData = writeCtx2.getData();
int position = appendedData.position();
int limit = appendedData.limit();
Assert.assertTrue(position == 12);
Assert.assertTrue(limit - position == 8);
Assert.assertTrue(appendedData.get(position) == (byte) 12);
Assert.assertTrue(appendedData.get(position + 1) == (byte) 13);
Assert.assertTrue(appendedData.get(position + 2) == (byte) 14);
Assert.assertTrue(appendedData.get(position + 7) == (byte) 19);
// Test current file write offset is at boundaries
buffer.position(0);
request = new WRITE3Request(new FileHandle(), 0, data.length,
WriteStableHow.UNSTABLE, buffer);
OpenFileCtx.alterWriteRequest(request, 1);
WriteCtx writeCtx3 = new WriteCtx(request.getHandle(), request.getOffset(),
request.getCount(), originalCount, request.getStableHow(),
request.getData(), null, 2, false, WriteCtx.DataState.NO_DUMP);
appendedData = writeCtx3.getData();
position = appendedData.position();
limit = appendedData.limit();
Assert.assertTrue(position == 1);
Assert.assertTrue(limit - position == 19);
Assert.assertTrue(appendedData.get(position) == (byte) 1);
Assert.assertTrue(appendedData.get(position + 18) == (byte) 19);
// Reset buffer position before test another boundary
buffer.position(0);
request = new WRITE3Request(new FileHandle(), 0, data.length,
WriteStableHow.UNSTABLE, buffer);
OpenFileCtx.alterWriteRequest(request, 19);
WriteCtx writeCtx4 = new WriteCtx(request.getHandle(), request.getOffset(),
request.getCount(), originalCount, request.getStableHow(),
request.getData(), null, 2, false, WriteCtx.DataState.NO_DUMP);
appendedData = writeCtx4.getData();
position = appendedData.position();
limit = appendedData.limit();
Assert.assertTrue(position == 19);
Assert.assertTrue(limit - position == 1);
Assert.assertTrue(appendedData.get(position) == (byte) 19);
}
@Test
// Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS, which
// includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX,
// COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC.
public void testCheckCommit() throws IOException {
DFSClient dfsClient = Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr = new Nfs3FileAttributes();
HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long) 0);
OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
new IdUserGroup());
COMMIT_STATUS ret;
// Test inactive open file context
ctx.setActiveStatusForTest(false);
ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX);
ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE);
// Test request with non zero commit offset
ctx.setActiveStatusForTest(true);
Mockito.when(fos.getPos()).thenReturn((long) 10);
ret = ctx.checkCommit(dfsClient, 5, null, 1, attr);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_DO_SYNC);
ret = ctx.checkCommit(dfsClient, 10, null, 1, attr);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_DO_SYNC);
ConcurrentNavigableMap<Long, CommitCtx> commits = ctx
.getPendingCommitsForTest();
Assert.assertTrue(commits.size() == 0);
ret = ctx.checkCommit(dfsClient, 11, null, 1, attr);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
Assert.assertTrue(commits.size() == 1);
long key = commits.firstKey();
Assert.assertTrue(key == 11);
// Test request with zero commit offset
commits.remove(new Long(11));
// There is one pending write [5,10]
ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
Assert.assertTrue(commits.size() == 1);
key = commits.firstKey();
Assert.assertTrue(key == 9);
// Empty pending writes
ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
}
}

View File

@ -253,8 +253,13 @@ Release 2.3.0 - UNRELEASED
HDFS-4953. Enable HDFS local reads via mmap. HDFS-4953. Enable HDFS local reads via mmap.
(Colin Patrick McCabe via wang). (Colin Patrick McCabe via wang).
HDFS-5342. Provide more information in the FSNamesystem JMX interfaces.
(Haohui Mai via jing9)
IMPROVEMENTS IMPROVEMENTS
HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
HDFS-4657. Limit the number of blocks logged by the NN after a block HDFS-4657. Limit the number of blocks logged by the NN after a block
report to a configurable value. (Aaron T. Myers via Colin Patrick report to a configurable value. (Aaron T. Myers via Colin Patrick
McCabe) McCabe)
@ -262,9 +267,6 @@ Release 2.3.0 - UNRELEASED
HDFS-4278. Log an ERROR when DFS_BLOCK_ACCESS_TOKEN_ENABLE config is HDFS-4278. Log an ERROR when DFS_BLOCK_ACCESS_TOKEN_ENABLE config is
disabled but security is turned on. (Kousuke Saruta via harsh) disabled but security is turned on. (Kousuke Saruta via harsh)
HDFS-4817. Make HDFS advisory caching configurable on a per-file basis.
(Colin Patrick McCabe)
HDFS-5004. Add additional JMX bean for NameNode status data HDFS-5004. Add additional JMX bean for NameNode status data
(Trevor Lorimer via cos) (Trevor Lorimer via cos)
@ -300,6 +302,23 @@ Release 2.3.0 - UNRELEASED
HDFS-5260. Merge zero-copy memory-mapped HDFS client reads to trunk and HDFS-5260. Merge zero-copy memory-mapped HDFS client reads to trunk and
branch-2. (cnauroth) branch-2. (cnauroth)
HDFS-4517. Cover class RemoteBlockReader with unit tests. (Vadim Bondarev
and Dennis Y via kihwal)
HDFS-4512. Cover package org.apache.hadoop.hdfs.server.common with tests.
(Vadim Bondarev via kihwal)
HDFS-4510. Cover classes ClusterJspHelper/NamenodeJspHelper with unit
tests. (Andrey Klochkov via kihwal)
HDFS-5323. Remove some deadcode in BlockManager (Colin Patrick McCabe)
HDFS-5338. Add a conf to disable hostname check in datanode registration.
(szetszwo)
HDFS-5130. Add test for snapshot related FsShell and DFSAdmin commands.
(Binglin Chang via jing9)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
@ -327,7 +346,13 @@ Release 2.3.0 - UNRELEASED
HDFS-5266. ElasticByteBufferPool#Key does not implement equals. (cnauroth) HDFS-5266. ElasticByteBufferPool#Key does not implement equals. (cnauroth)
Release 2.2.0 - UNRELEASED HDFS-5352. Server#initLog() doesn't close InputStream in httpfs. (Ted Yu via
jing9)
HDFS-5283. Under construction blocks only inside snapshots should not be
counted in safemode threshhold. (Vinay via szetszwo)
Release 2.2.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -339,12 +364,46 @@ Release 2.2.0 - UNRELEASED
BUG FIXES BUG FIXES
Release 2.1.2 - UNRELEASED HDFS-5307. Support both HTTP and HTTPS in jsp pages (Haohui Mai via
brandonli)
HDFS-5291. Standby namenode after transition to active goes into safemode.
(jing9)
HDFS-5317. Go back to DFS Home link does not work on datanode webUI
(Haohui Mai via brandonli)
HDFS-5316. Namenode ignores the default https port (Haohui Mai via
brandonli)
HDFS-5281. COMMIT request should not block. (brandonli)
HDFS-5337. should do hsync for a commit request even there is no pending
writes (brandonli)
HDFS-5335. Hive query failed with possible race in dfs output stream.
(Haohui Mai via suresh)
HDFS-5322. HDFS delegation token not found in cache errors seen on secure HA
clusters. (jing9)
HDFS-5329. Update FSNamesystem#getListing() to handle inode path in startAfter
token. (brandonli)
HDFS-5330. fix readdir and readdirplus for large directories (brandonli)
Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
NEW FEATURES NEW FEATURES
HDFS-4817. Make HDFS advisory caching configurable on a per-file basis.
(Colin Patrick McCabe)
HDFS-5230. Introduce RpcInfo to decouple XDR classes from the RPC API.
(Haohui Mai via brandonli)
IMPROVEMENTS IMPROVEMENTS
HDFS-5246. Make Hadoop nfs server port and mount daemon port HDFS-5246. Make Hadoop nfs server port and mount daemon port
@ -353,6 +412,9 @@ Release 2.1.2 - UNRELEASED
HDFS-5256. Use guava LoadingCache to implement DFSClientCache. (Haohui Mai HDFS-5256. Use guava LoadingCache to implement DFSClientCache. (Haohui Mai
via brandonli) via brandonli)
HDFS-5308. Replace HttpConfig#getSchemePrefix with implicit schemes in HDFS
JSP. (Haohui Mai via jing9)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -373,6 +435,27 @@ Release 2.1.2 - UNRELEASED
HDFS-5265. Namenode fails to start when dfs.https.port is unspecified. HDFS-5265. Namenode fails to start when dfs.https.port is unspecified.
(Haohui Mai via jing9) (Haohui Mai via jing9)
HDFS-5255. Distcp job fails with hsftp when https is enabled in insecure
cluster. (Arpit Agarwal)
HDFS-5279. Guard against NullPointerException in NameNode JSP pages before
initialization of FSNamesystem. (cnauroth)
HDFS-5289. Race condition in TestRetryCacheWithHA#testCreateSymlink causes
spurious test failure. (atm)
HDFS-5300. FSNameSystem#deleteSnapshot() should not check owner in case of
permissions disabled. (Vinay via jing9)
HDFS-5306. Datanode https port is not available at the namenode. (Suresh
Srinivas via brandonli)
HDFS-5299. DFS client hangs in updatePipeline RPC when failover happened.
(Vinay via jing9)
HDFS-5259. Support client which combines appended data with old data
before sends it to NFS server. (brandonli)
Release 2.1.1-beta - 2013-09-23 Release 2.1.1-beta - 2013-09-23
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -900,10 +900,15 @@ public class DFSClient implements java.io.Closeable {
assert dtService != null; assert dtService != null;
Token<DelegationTokenIdentifier> token = Token<DelegationTokenIdentifier> token =
namenode.getDelegationToken(renewer); namenode.getDelegationToken(renewer);
token.setService(this.dtService);
LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(token)); if (token != null) {
token.setService(this.dtService);
LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(token));
} else {
LOG.info("Cannot get delegation token from " + renewer);
}
return token; return token;
} }
/** /**

View File

@ -193,7 +193,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean DFS_DATANODE_SYNCONCLOSE_DEFAULT = false; public static final boolean DFS_DATANODE_SYNCONCLOSE_DEFAULT = false;
public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive"; public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive";
public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000; public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000;
public static final String DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY = "dfs.namenode.datanode.registration.ip-hostname-check";
public static final boolean DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT = true;
// Whether to enable datanode's stale state detection and usage for reads // Whether to enable datanode's stale state detection and usage for reads
public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode"; public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode";
public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false; public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false;

View File

@ -38,6 +38,7 @@ import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.CanSetDropBehind; import org.apache.hadoop.fs.CanSetDropBehind;
@ -140,7 +141,7 @@ public class DFSOutputStream extends FSOutputSummer
private long bytesCurBlock = 0; // bytes writen in current block private long bytesCurBlock = 0; // bytes writen in current block
private int packetSize = 0; // write packet size, not including the header. private int packetSize = 0; // write packet size, not including the header.
private int chunksPerPacket = 0; private int chunksPerPacket = 0;
private volatile IOException lastException = null; private final AtomicReference<IOException> lastException = new AtomicReference<IOException>();
private long artificialSlowdown = 0; private long artificialSlowdown = 0;
private long lastFlushOffset = 0; // offset when flush was invoked private long lastFlushOffset = 0; // offset when flush was invoked
//persist blocks on namenode //persist blocks on namenode
@ -810,8 +811,8 @@ public class DFSOutputStream extends FSOutputSummer
if (++pipelineRecoveryCount > 5) { if (++pipelineRecoveryCount > 5) {
DFSClient.LOG.warn("Error recovering pipeline for writing " + DFSClient.LOG.warn("Error recovering pipeline for writing " +
block + ". Already retried 5 times for the same packet."); block + ". Already retried 5 times for the same packet.");
lastException = new IOException("Failing write. Tried pipeline " + lastException.set(new IOException("Failing write. Tried pipeline " +
"recovery 5 times without success."); "recovery 5 times without success."));
streamerClosed = true; streamerClosed = true;
return false; return false;
} }
@ -1002,8 +1003,8 @@ public class DFSOutputStream extends FSOutputSummer
} }
} }
if (nodes.length <= 1) { if (nodes.length <= 1) {
lastException = new IOException("All datanodes " + pipelineMsg lastException.set(new IOException("All datanodes " + pipelineMsg
+ " are bad. Aborting..."); + " are bad. Aborting..."));
streamerClosed = true; streamerClosed = true;
return false; return false;
} }
@ -1018,7 +1019,7 @@ public class DFSOutputStream extends FSOutputSummer
newnodes.length-errorIndex); newnodes.length-errorIndex);
nodes = newnodes; nodes = newnodes;
hasError = false; hasError = false;
lastException = null; lastException.set(null);
errorIndex = -1; errorIndex = -1;
} }
@ -1063,7 +1064,7 @@ public class DFSOutputStream extends FSOutputSummer
ExtendedBlock oldBlock = block; ExtendedBlock oldBlock = block;
do { do {
hasError = false; hasError = false;
lastException = null; lastException.set(null);
errorIndex = -1; errorIndex = -1;
success = false; success = false;
@ -1278,9 +1279,7 @@ public class DFSOutputStream extends FSOutputSummer
} }
private void setLastException(IOException e) { private void setLastException(IOException e) {
if (lastException == null) { lastException.compareAndSet(null, e);
lastException = e;
}
} }
} }
@ -1312,7 +1311,7 @@ public class DFSOutputStream extends FSOutputSummer
protected void checkClosed() throws IOException { protected void checkClosed() throws IOException {
if (closed) { if (closed) {
IOException e = lastException; IOException e = lastException.get();
throw e != null ? e : new ClosedChannelException(); throw e != null ? e : new ClosedChannelException();
} }
} }
@ -1468,6 +1467,7 @@ public class DFSOutputStream extends FSOutputSummer
private void waitAndQueueCurrentPacket() throws IOException { private void waitAndQueueCurrentPacket() throws IOException {
synchronized (dataQueue) { synchronized (dataQueue) {
try {
// If queue is full, then wait till we have enough space // If queue is full, then wait till we have enough space
while (!closed && dataQueue.size() + ackQueue.size() > MAX_PACKETS) { while (!closed && dataQueue.size() + ackQueue.size() > MAX_PACKETS) {
try { try {
@ -1486,6 +1486,8 @@ public class DFSOutputStream extends FSOutputSummer
} }
checkClosed(); checkClosed();
queueCurrentPacket(); queueCurrentPacket();
} catch (ClosedChannelException e) {
}
} }
} }
@ -1729,7 +1731,7 @@ public class DFSOutputStream extends FSOutputSummer
DFSClient.LOG.warn("Error while syncing", e); DFSClient.LOG.warn("Error while syncing", e);
synchronized (this) { synchronized (this) {
if (!closed) { if (!closed) {
lastException = new IOException("IOException flush:" + e); lastException.set(new IOException("IOException flush:" + e));
closeThreads(true); closeThreads(true);
} }
} }
@ -1787,21 +1789,25 @@ public class DFSOutputStream extends FSOutputSummer
if (DFSClient.LOG.isDebugEnabled()) { if (DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("Waiting for ack for: " + seqno); DFSClient.LOG.debug("Waiting for ack for: " + seqno);
} }
synchronized (dataQueue) { try {
while (!closed) { synchronized (dataQueue) {
checkClosed(); while (!closed) {
if (lastAckedSeqno >= seqno) { checkClosed();
break; if (lastAckedSeqno >= seqno) {
} break;
try { }
dataQueue.wait(1000); // when we receive an ack, we notify on dataQueue try {
} catch (InterruptedException ie) { dataQueue.wait(1000); // when we receive an ack, we notify on
throw new InterruptedIOException( // dataQueue
"Interrupted while waiting for data to be acknowledged by pipeline"); } catch (InterruptedException ie) {
throw new InterruptedIOException(
"Interrupted while waiting for data to be acknowledged by pipeline");
}
} }
} }
checkClosed();
} catch (ClosedChannelException e) {
} }
checkClosed();
} }
private synchronized void start() { private synchronized void start() {
@ -1847,7 +1853,7 @@ public class DFSOutputStream extends FSOutputSummer
@Override @Override
public synchronized void close() throws IOException { public synchronized void close() throws IOException {
if (closed) { if (closed) {
IOException e = lastException; IOException e = lastException.getAndSet(null);
if (e == null) if (e == null)
return; return;
else else
@ -1875,6 +1881,7 @@ public class DFSOutputStream extends FSOutputSummer
closeThreads(false); closeThreads(false);
completeFile(lastBlock); completeFile(lastBlock);
dfsClient.endFileLease(src); dfsClient.endFileLease(src);
} catch (ClosedChannelException e) {
} finally { } finally {
closed = true; closed = true;
} }

View File

@ -17,15 +17,9 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import com.google.common.base.Joiner;
import java.io.IOException; import com.google.common.base.Preconditions;
import java.net.InetSocketAddress; import com.google.common.collect.Lists;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
@ -41,11 +35,17 @@ import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX;
import com.google.common.base.Joiner; import java.io.IOException;
import com.google.common.base.Preconditions; import java.net.InetSocketAddress;
import com.google.common.collect.Lists; import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX;
public class HAUtil { public class HAUtil {
@ -265,10 +265,15 @@ public class HAUtil {
tokenSelector.selectToken(haService, ugi.getTokens()); tokenSelector.selectToken(haService, ugi.getTokens());
if (haToken != null) { if (haToken != null) {
for (InetSocketAddress singleNNAddr : nnAddrs) { for (InetSocketAddress singleNNAddr : nnAddrs) {
// this is a minor hack to prevent physical HA tokens from being
// exposed to the user via UGI.getCredentials(), otherwise these
// cloned tokens may be inadvertently propagated to jobs
Token<DelegationTokenIdentifier> specificToken = Token<DelegationTokenIdentifier> specificToken =
new Token<DelegationTokenIdentifier>(haToken); new Token.PrivateToken<DelegationTokenIdentifier>(haToken);
SecurityUtil.setTokenService(specificToken, singleNNAddr); SecurityUtil.setTokenService(specificToken, singleNNAddr);
ugi.addToken(specificToken); Text alias =
new Text(HA_DT_SERVICE_PREFIX + "//" + specificToken.getService());
ugi.addToken(alias, specificToken);
LOG.debug("Mapped HA service delegation token for logical URI " + LOG.debug("Mapped HA service delegation token for logical URI " +
haUri + " to namenode " + singleNNAddr); haUri + " to namenode " + singleNNAddr);
} }

View File

@ -94,7 +94,6 @@ public class HftpFileSystem extends FileSystem
private URI hftpURI; private URI hftpURI;
protected URI nnUri; protected URI nnUri;
protected URI nnSecureUri;
public static final String HFTP_TIMEZONE = "UTC"; public static final String HFTP_TIMEZONE = "UTC";
public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ"; public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";
@ -134,34 +133,33 @@ public class HftpFileSystem extends FileSystem
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT); DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
} }
protected int getDefaultSecurePort() { /**
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, * We generate the address with one of the following ports, in
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT); * order of preference.
} * 1. Port from the hftp URI e.g. hftp://namenode:4000/ will return 4000.
* 2. Port configured via DFS_NAMENODE_HTTP_PORT_KEY
* 3. DFS_NAMENODE_HTTP_PORT_DEFAULT i.e. 50070.
*
* @param uri
* @return
*/
protected InetSocketAddress getNamenodeAddr(URI uri) { protected InetSocketAddress getNamenodeAddr(URI uri) {
// use authority so user supplied uri can override port // use authority so user supplied uri can override port
return NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort()); return NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
} }
protected InetSocketAddress getNamenodeSecureAddr(URI uri) {
// must only use the host and the configured https port
return NetUtils.createSocketAddrForHost(uri.getHost(), getDefaultSecurePort());
}
protected URI getNamenodeUri(URI uri) { protected URI getNamenodeUri(URI uri) {
return DFSUtil.createUri("http", getNamenodeAddr(uri)); return DFSUtil.createUri(getUnderlyingProtocol(), getNamenodeAddr(uri));
}
protected URI getNamenodeSecureUri(URI uri) {
return DFSUtil.createUri("http", getNamenodeSecureAddr(uri));
} }
/**
* See the documentation of {@Link #getNamenodeAddr(URI)} for the logic
* behind selecting the canonical service name.
* @return
*/
@Override @Override
public String getCanonicalServiceName() { public String getCanonicalServiceName() {
// unlike other filesystems, hftp's service is the secure port, not the return SecurityUtil.buildTokenService(nnUri).toString();
// actual port in the uri
return SecurityUtil.buildTokenService(nnSecureUri).toString();
} }
@Override @Override
@ -187,7 +185,6 @@ public class HftpFileSystem extends FileSystem
setConf(conf); setConf(conf);
this.ugi = UserGroupInformation.getCurrentUser(); this.ugi = UserGroupInformation.getCurrentUser();
this.nnUri = getNamenodeUri(name); this.nnUri = getNamenodeUri(name);
this.nnSecureUri = getNamenodeSecureUri(name);
try { try {
this.hftpURI = new URI(name.getScheme(), name.getAuthority(), this.hftpURI = new URI(name.getScheme(), name.getAuthority(),
null, null, null); null, null, null);
@ -225,7 +222,7 @@ public class HftpFileSystem extends FileSystem
protected Token<DelegationTokenIdentifier> selectDelegationToken( protected Token<DelegationTokenIdentifier> selectDelegationToken(
UserGroupInformation ugi) { UserGroupInformation ugi) {
return hftpTokenSelector.selectToken(nnSecureUri, ugi.getTokens(), getConf()); return hftpTokenSelector.selectToken(nnUri, ugi.getTokens(), getConf());
} }
@ -234,6 +231,13 @@ public class HftpFileSystem extends FileSystem
return renewToken; return renewToken;
} }
/**
* Return the underlying protocol that is used to talk to the namenode.
*/
protected String getUnderlyingProtocol() {
return "http";
}
@Override @Override
public synchronized <T extends TokenIdentifier> void setDelegationToken(Token<T> token) { public synchronized <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
renewToken = token; renewToken = token;
@ -257,7 +261,7 @@ public class HftpFileSystem extends FileSystem
return ugi.doAs(new PrivilegedExceptionAction<Token<?>>() { return ugi.doAs(new PrivilegedExceptionAction<Token<?>>() {
@Override @Override
public Token<?> run() throws IOException { public Token<?> run() throws IOException {
final String nnHttpUrl = nnSecureUri.toString(); final String nnHttpUrl = nnUri.toString();
Credentials c; Credentials c;
try { try {
c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer); c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer);
@ -301,7 +305,7 @@ public class HftpFileSystem extends FileSystem
* @throws IOException on error constructing the URL * @throws IOException on error constructing the URL
*/ */
protected URL getNamenodeURL(String path, String query) throws IOException { protected URL getNamenodeURL(String path, String query) throws IOException {
final URL url = new URL("http", nnUri.getHost(), final URL url = new URL(getUnderlyingProtocol(), nnUri.getHost(),
nnUri.getPort(), path + '?' + query); nnUri.getPort(), path + '?' + query);
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {
LOG.trace("url=" + url); LOG.trace("url=" + url);
@ -703,17 +707,20 @@ public class HftpFileSystem extends FileSystem
return true; return true;
} }
protected String getUnderlyingProtocol() {
return "http";
}
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
@Override @Override
public long renew(Token<?> token, public long renew(Token<?> token,
Configuration conf) throws IOException { Configuration conf) throws IOException {
// update the kerberos credentials, if they are coming from a keytab // update the kerberos credentials, if they are coming from a keytab
UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab(); UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
// use http to renew the token
InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token); InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
return return
DelegationTokenFetcher.renewDelegationToken DelegationTokenFetcher.renewDelegationToken
(DFSUtil.createUri("http", serviceAddr).toString(), (DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr).toString(),
(Token<DelegationTokenIdentifier>) token); (Token<DelegationTokenIdentifier>) token);
} }
@ -723,10 +730,9 @@ public class HftpFileSystem extends FileSystem
Configuration conf) throws IOException { Configuration conf) throws IOException {
// update the kerberos credentials, if they are coming from a keytab // update the kerberos credentials, if they are coming from a keytab
UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab(); UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
// use http to cancel the token
InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token); InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
DelegationTokenFetcher.cancelDelegationToken DelegationTokenFetcher.cancelDelegationToken
(DFSUtil.createUri("http", serviceAddr).toString(), (DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr).toString(),
(Token<DelegationTokenIdentifier>) token); (Token<DelegationTokenIdentifier>) token);
} }
} }

View File

@ -68,6 +68,14 @@ public class HsftpFileSystem extends HftpFileSystem {
return "hsftp"; return "hsftp";
} }
/**
* Return the underlying protocol that is used to talk to the namenode.
*/
@Override
protected String getUnderlyingProtocol() {
return "https";
}
@Override @Override
public void initialize(URI name, Configuration conf) throws IOException { public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf); super.initialize(name, conf);
@ -134,24 +142,15 @@ public class HsftpFileSystem extends HftpFileSystem {
@Override @Override
protected int getDefaultPort() { protected int getDefaultPort() {
return getDefaultSecurePort(); return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
} }
@Override
protected InetSocketAddress getNamenodeSecureAddr(URI uri) {
return getNamenodeAddr(uri);
}
@Override
protected URI getNamenodeUri(URI uri) {
return getNamenodeSecureUri(uri);
}
@Override @Override
protected HttpURLConnection openConnection(String path, String query) protected HttpURLConnection openConnection(String path, String query)
throws IOException { throws IOException {
query = addDelegationTokenParam(query); query = addDelegationTokenParam(query);
final URL url = new URL("https", nnUri.getHost(), final URL url = new URL(getUnderlyingProtocol(), nnUri.getHost(),
nnUri.getPort(), path + '?' + query); nnUri.getPort(), path + '?' + query);
HttpsURLConnection conn; HttpsURLConnection conn;
conn = (HttpsURLConnection)connectionFactory.openConnection(url); conn = (HttpsURLConnection)connectionFactory.openConnection(url);

View File

@ -44,6 +44,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
private String peerHostName; // hostname from the actual connection private String peerHostName; // hostname from the actual connection
private int xferPort; // data streaming port private int xferPort; // data streaming port
private int infoPort; // info server port private int infoPort; // info server port
private int infoSecurePort; // info server port
private int ipcPort; // IPC server port private int ipcPort; // IPC server port
// UUID identifying a given datanode. For upgraded Datanodes this is the // UUID identifying a given datanode. For upgraded Datanodes this is the
@ -53,11 +54,12 @@ public class DatanodeID implements Comparable<DatanodeID> {
public DatanodeID(DatanodeID from) { public DatanodeID(DatanodeID from) {
this(from.getIpAddr(), this(from.getIpAddr(),
from.getHostName(), from.getHostName(),
from.getDatanodeUuid(), from.getDatanodeUuid(),
from.getXferPort(), from.getXferPort(),
from.getInfoPort(), from.getInfoPort(),
from.getIpcPort()); from.getInfoSecurePort(),
from.getIpcPort());
this.peerHostName = from.getPeerHostName(); this.peerHostName = from.getPeerHostName();
} }
@ -74,12 +76,13 @@ public class DatanodeID implements Comparable<DatanodeID> {
* @param ipcPort ipc server port * @param ipcPort ipc server port
*/ */
public DatanodeID(String ipAddr, String hostName, String datanodeUuid, public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
int xferPort, int infoPort, int ipcPort) { int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
this.ipAddr = ipAddr; this.ipAddr = ipAddr;
this.hostName = hostName; this.hostName = hostName;
this.datanodeUuid = checkDatanodeUuid(datanodeUuid); this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
this.xferPort = xferPort; this.xferPort = xferPort;
this.infoPort = infoPort; this.infoPort = infoPort;
this.infoSecurePort = infoSecurePort;
this.ipcPort = ipcPort; this.ipcPort = ipcPort;
} }
@ -157,6 +160,13 @@ public class DatanodeID implements Comparable<DatanodeID> {
return ipAddr + ":" + infoPort; return ipAddr + ":" + infoPort;
} }
/**
* @return IP:infoPort string
*/
public String getInfoSecureAddr() {
return ipAddr + ":" + infoSecurePort;
}
/** /**
* @return hostname:xferPort * @return hostname:xferPort
*/ */
@ -201,6 +211,13 @@ public class DatanodeID implements Comparable<DatanodeID> {
return infoPort; return infoPort;
} }
/**
* @return infoSecurePort (the port at which the HTTPS server bound to)
*/
public int getInfoSecurePort() {
return infoSecurePort;
}
/** /**
* @return ipcPort (the port at which the IPC server bound to) * @return ipcPort (the port at which the IPC server bound to)
*/ */
@ -240,13 +257,14 @@ public class DatanodeID implements Comparable<DatanodeID> {
peerHostName = nodeReg.getPeerHostName(); peerHostName = nodeReg.getPeerHostName();
xferPort = nodeReg.getXferPort(); xferPort = nodeReg.getXferPort();
infoPort = nodeReg.getInfoPort(); infoPort = nodeReg.getInfoPort();
infoSecurePort = nodeReg.getInfoSecurePort();
ipcPort = nodeReg.getIpcPort(); ipcPort = nodeReg.getIpcPort();
} }
/** /**
* Compare based on data transfer address. * Compare based on data transfer address.
* *
* @param that * @param that datanode to compare with
* @return as specified by Comparable * @return as specified by Comparable
*/ */
@Override @Override

View File

@ -17,10 +17,6 @@
*/ */
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -32,6 +28,10 @@ import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import java.util.Date;
import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
/** /**
* This class extends the primary identifier of a Datanode with ephemeral * This class extends the primary identifier of a Datanode with ephemeral
* state, eg usage information, current administrative state, and the * state, eg usage information, current administrative state, and the
@ -108,18 +108,21 @@ public class DatanodeInfo extends DatanodeID implements Node {
final long capacity, final long dfsUsed, final long remaining, final long capacity, final long dfsUsed, final long remaining,
final long blockPoolUsed, final long lastUpdate, final int xceiverCount, final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
final AdminStates adminState) { final AdminStates adminState) {
this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(), nodeID.getXferPort(), this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(),
nodeID.getInfoPort(), nodeID.getIpcPort(), capacity, dfsUsed, remaining, nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(),
blockPoolUsed, lastUpdate, xceiverCount, location, adminState); nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed,
lastUpdate, xceiverCount, location, adminState);
} }
/** Constructor */ /** Constructor */
public DatanodeInfo(final String ipAddr, final String hostName, public DatanodeInfo(final String ipAddr, final String hostName,
final String DatanodeUuid, final int xferPort, final int infoPort, final int ipcPort, final String DatanodeUuid, final int xferPort, final int infoPort,
final int infoSecurePort, final int ipcPort,
final long capacity, final long dfsUsed, final long remaining, final long capacity, final long dfsUsed, final long remaining,
final long blockPoolUsed, final long lastUpdate, final int xceiverCount, final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
final String networkLocation, final AdminStates adminState) { final String networkLocation, final AdminStates adminState) {
super(ipAddr, hostName, DatanodeUuid, xferPort, infoPort, ipcPort); super(ipAddr, hostName, DatanodeUuid, xferPort, infoPort,
infoSecurePort, ipcPort);
this.capacity = capacity; this.capacity = capacity;
this.dfsUsed = dfsUsed; this.dfsUsed = dfsUsed;
this.remaining = remaining; this.remaining = remaining;

View File

@ -227,7 +227,8 @@ public class PBHelper {
// DatanodeId // DatanodeId
public static DatanodeID convert(DatanodeIDProto dn) { public static DatanodeID convert(DatanodeIDProto dn) {
return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getDatanodeUuid(), return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getDatanodeUuid(),
dn.getXferPort(), dn.getInfoPort(), dn.getIpcPort()); dn.getXferPort(), dn.getInfoPort(), dn.hasInfoSecurePort() ? dn
.getInfoSecurePort() : 0, dn.getIpcPort());
} }
public static DatanodeIDProto convert(DatanodeID dn) { public static DatanodeIDProto convert(DatanodeID dn) {
@ -240,6 +241,7 @@ public class PBHelper {
.setXferPort(dn.getXferPort()) .setXferPort(dn.getXferPort())
.setDatanodeUuid(dn.getDatanodeUuid() != null ? dn.getDatanodeUuid() : "") .setDatanodeUuid(dn.getDatanodeUuid() != null ? dn.getDatanodeUuid() : "")
.setInfoPort(dn.getInfoPort()) .setInfoPort(dn.getInfoPort())
.setInfoSecurePort(dn.getInfoSecurePort())
.setIpcPort(dn.getIpcPort()).build(); .setIpcPort(dn.getIpcPort()).build();
} }

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Co
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
@ -115,6 +116,24 @@ public class DelegationTokenSecretManager
return super.retrievePassword(identifier); return super.retrievePassword(identifier);
} }
@Override
public byte[] retriableRetrievePassword(DelegationTokenIdentifier identifier)
throws InvalidToken, StandbyException, RetriableException, IOException {
namesystem.checkOperation(OperationCategory.READ);
try {
return super.retrievePassword(identifier);
} catch (InvalidToken it) {
if (namesystem.inTransitionToActive()) {
// if the namesystem is currently in the middle of transition to
// active state, let client retry since the corresponding editlog may
// have not been applied yet
throw new RetriableException(it);
} else {
throw it;
}
}
}
/** /**
* Returns expiry time of a token given its identifier. * Returns expiry time of a token given its identifier.
* *

View File

@ -79,6 +79,7 @@ import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.LightWeightGSet;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
@ -94,9 +95,6 @@ public class BlockManager {
static final Log LOG = LogFactory.getLog(BlockManager.class); static final Log LOG = LogFactory.getLog(BlockManager.class);
public static final Log blockLog = NameNode.blockStateChangeLog; public static final Log blockLog = NameNode.blockStateChangeLog;
/** Default load factor of map */
public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f;
private static final String QUEUE_REASON_CORRUPT_STATE = private static final String QUEUE_REASON_CORRUPT_STATE =
"it has the wrong state or generation stamp"; "it has the wrong state or generation stamp";
@ -248,7 +246,8 @@ public class BlockManager {
invalidateBlocks = new InvalidateBlocks(datanodeManager); invalidateBlocks = new InvalidateBlocks(datanodeManager);
// Compute the map capacity by allocating 2% of total memory // Compute the map capacity by allocating 2% of total memory
blocksMap = new BlocksMap(DEFAULT_MAP_LOAD_FACTOR); blocksMap = new BlocksMap(
LightWeightGSet.computeCapacity(2.0, "BlocksMap"));
blockplacement = BlockPlacementPolicy.getInstance( blockplacement = BlockPlacementPolicy.getInstance(
conf, stats, datanodeManager.getNetworkTopology()); conf, stats, datanodeManager.getNetworkTopology());
pendingReplications = new PendingReplicationBlocks(conf.getInt( pendingReplications = new PendingReplicationBlocks(conf.getInt(
@ -1792,6 +1791,14 @@ public class BlockManager {
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
((BlockInfoUnderConstruction)storedBlock).addReplicaIfNotPresent( ((BlockInfoUnderConstruction)storedBlock).addReplicaIfNotPresent(
node.getStorageInfo(storageID), iblk, reportedState); node.getStorageInfo(storageID), iblk, reportedState);
// OpenFileBlocks only inside snapshots also will be added to safemode
// threshold. So we need to update such blocks to safemode
// refer HDFS-5283
BlockInfoUnderConstruction blockUC = (BlockInfoUnderConstruction) storedBlock;
if (namesystem.isInSnapshot(blockUC)) {
int numOfReplicas = blockUC.getNumExpectedLocations();
namesystem.incrementSafeBlockCount(numOfReplicas);
}
//and fall through to next clause //and fall through to next clause
} }
//add replica if appropriate //add replica if appropriate

View File

@ -57,11 +57,11 @@ class BlocksMap {
/** Constant {@link LightWeightGSet} capacity. */ /** Constant {@link LightWeightGSet} capacity. */
private final int capacity; private final int capacity;
private volatile GSet<Block, BlockInfo> blocks; private GSet<Block, BlockInfo> blocks;
BlocksMap(final float loadFactor) { BlocksMap(int capacity) {
// Use 2% of total memory to size the GSet capacity // Use 2% of total memory to size the GSet capacity
this.capacity = LightWeightGSet.computeCapacity(2.0, "BlocksMap"); this.capacity = capacity;
this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity); this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity);
} }

View File

@ -17,21 +17,9 @@
*/ */
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.apache.hadoop.util.Time.now; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import java.io.IOException; import com.google.common.net.InetAddresses;
import java.io.PrintWriter;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.NavigableMap;
import java.util.TreeMap;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
@ -41,13 +29,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.namenode.HostFileManager; import org.apache.hadoop.hdfs.server.namenode.HostFileManager;
import org.apache.hadoop.hdfs.server.namenode.HostFileManager.Entry; import org.apache.hadoop.hdfs.server.namenode.HostFileManager.Entry;
@ -55,33 +38,23 @@ import org.apache.hadoop.hdfs.server.namenode.HostFileManager.EntrySet;
import org.apache.hadoop.hdfs.server.namenode.HostFileManager.MutableEntrySet; import org.apache.hadoop.hdfs.server.namenode.HostFileManager.MutableEntrySet;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; import org.apache.hadoop.hdfs.server.protocol.*;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.apache.hadoop.hdfs.util.CyclicIteration; import org.apache.hadoop.hdfs.util.CyclicIteration;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.CachedDNSToSwitchMapping; import org.apache.hadoop.net.*;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException; import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.net.ScriptBasedMapping;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting; import java.io.IOException;
import com.google.common.base.Preconditions; import java.io.PrintWriter;
import com.google.common.net.InetAddresses; import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.*;
import static org.apache.hadoop.util.Time.now;
/** /**
* Manage datanodes, include decommission and other activities. * Manage datanodes, include decommission and other activities.
@ -128,6 +101,8 @@ public class DatanodeManager {
private final int defaultInfoPort; private final int defaultInfoPort;
private final int defaultInfoSecurePort;
private final int defaultIpcPort; private final int defaultIpcPort;
/** Read include/exclude files*/ /** Read include/exclude files*/
@ -167,6 +142,7 @@ public class DatanodeManager {
*/ */
private boolean hasClusterEverBeenMultiRack = false; private boolean hasClusterEverBeenMultiRack = false;
private final boolean checkIpHostnameInRegistration;
/** /**
* The number of datanodes for each software version. This list should change * The number of datanodes for each software version. This list should change
* during rolling upgrades. * during rolling upgrades.
@ -189,7 +165,10 @@ public class DatanodeManager {
DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort(); DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort();
this.defaultInfoPort = NetUtils.createSocketAddr( this.defaultInfoPort = NetUtils.createSocketAddr(
conf.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, conf.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort(); DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT)).getPort();
this.defaultInfoSecurePort = NetUtils.createSocketAddr(
conf.get(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort();
this.defaultIpcPort = NetUtils.createSocketAddr( this.defaultIpcPort = NetUtils.createSocketAddr(
conf.get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, conf.get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort(); DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
@ -232,6 +211,12 @@ public class DatanodeManager {
LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
+ "=" + this.blockInvalidateLimit); + "=" + this.blockInvalidateLimit);
this.checkIpHostnameInRegistration = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY,
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT);
LOG.info(DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY
+ "=" + checkIpHostnameInRegistration);
this.avoidStaleDataNodesForRead = conf.getBoolean( this.avoidStaleDataNodesForRead = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT); DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT);
@ -757,11 +742,13 @@ public class DatanodeManager {
// Mostly called inside an RPC, update ip and peer hostname // Mostly called inside an RPC, update ip and peer hostname
String hostname = dnAddress.getHostName(); String hostname = dnAddress.getHostName();
String ip = dnAddress.getHostAddress(); String ip = dnAddress.getHostAddress();
if (!isNameResolved(dnAddress)) { if (checkIpHostnameInRegistration && !isNameResolved(dnAddress)) {
// Reject registration of unresolved datanode to prevent performance // Reject registration of unresolved datanode to prevent performance
// impact of repetitive DNS lookups later. // impact of repetitive DNS lookups later.
LOG.warn("Unresolved datanode registration from " + ip); final String message = "hostname cannot be resolved (ip="
throw new DisallowedDatanodeException(nodeReg); + ip + ", hostname=" + hostname + ")";
LOG.warn("Unresolved datanode registration: " + message);
throw new DisallowedDatanodeException(nodeReg, message);
} }
// update node registration with the ip and hostname from rpc request // update node registration with the ip and hostname from rpc request
nodeReg.setIpAddr(ip); nodeReg.setIpAddr(ip);
@ -1131,6 +1118,7 @@ public class DatanodeManager {
// The IP:port is sufficient for listing in a report // The IP:port is sufficient for listing in a report
dnId = new DatanodeID(hostStr, "", "", port, dnId = new DatanodeID(hostStr, "", "", port,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT); DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
} else { } else {
String ipAddr = ""; String ipAddr = "";
@ -1141,6 +1129,7 @@ public class DatanodeManager {
} }
dnId = new DatanodeID(ipAddr, hostStr, "", port, dnId = new DatanodeID(ipAddr, hostStr, "", port,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT); DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
} }
return dnId; return dnId;
@ -1188,7 +1177,7 @@ public class DatanodeManager {
new DatanodeDescriptor(new DatanodeID(entry.getIpAddress(), new DatanodeDescriptor(new DatanodeID(entry.getIpAddress(),
entry.getPrefix(), "", entry.getPrefix(), "",
entry.getPort() == 0 ? defaultXferPort : entry.getPort(), entry.getPort() == 0 ? defaultXferPort : entry.getPort(),
defaultInfoPort, defaultIpcPort)); defaultInfoPort, defaultInfoSecurePort, defaultIpcPort));
dn.setLastUpdate(0); // Consider this node dead for reporting dn.setLastUpdate(0); // Consider this node dead for reporting
nodes.add(dn); nodes.add(dn);
} }
@ -1207,17 +1196,17 @@ public class DatanodeManager {
/** /**
* Checks if name resolution was successful for the given address. If IP * Checks if name resolution was successful for the given address. If IP
* address and host name are the same, then it means name resolution has * address and host name are the same, then it means name resolution has
* failed. As a special case, the loopback address is also considered * failed. As a special case, local addresses are also considered
* acceptable. This is particularly important on Windows, where 127.0.0.1 does * acceptable. This is particularly important on Windows, where 127.0.0.1 does
* not resolve to "localhost". * not resolve to "localhost".
* *
* @param address InetAddress to check * @param address InetAddress to check
* @return boolean true if name resolution successful or address is loopback * @return boolean true if name resolution successful or address is local
*/ */
private static boolean isNameResolved(InetAddress address) { private static boolean isNameResolved(InetAddress address) {
String hostname = address.getHostName(); String hostname = address.getHostName();
String ip = address.getHostAddress(); String ip = address.getHostAddress();
return !hostname.equals(ip) || address.isLoopbackAddress(); return !hostname.equals(ip) || NetUtils.isLocalAddress(address);
} }
private void setDatanodeDead(DatanodeDescriptor node) { private void setDatanodeDead(DatanodeDescriptor node) {

View File

@ -18,24 +18,7 @@
package org.apache.hadoop.hdfs.server.common; package org.apache.hadoop.hdfs.server.common;
import java.io.ByteArrayInputStream; import com.google.common.base.Charsets;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.URL;
import java.net.URLEncoder;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.TreeSet;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.jsp.JspWriter;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -47,13 +30,9 @@ import org.apache.hadoop.hdfs.BlockReaderFactory;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.net.TcpPeerServer; import org.apache.hadoop.hdfs.net.TcpPeerServer;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
@ -74,10 +53,22 @@ import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionInfo;
import com.google.common.base.Charsets; import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.jsp.JspWriter;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.URL;
import java.net.URLEncoder;
import java.util.*;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER; import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
@InterfaceAudience.Private @InterfaceAudience.Private
public class JspHelper { public class JspHelper {
@ -112,7 +103,7 @@ public class JspHelper {
return super.hashCode(); return super.hashCode();
} }
} }
// compare two records based on their frequency // compare two records based on their frequency
private static class NodeRecordComparator implements Comparator<NodeRecord> { private static class NodeRecordComparator implements Comparator<NodeRecord> {
@ -126,6 +117,27 @@ public class JspHelper {
return 0; return 0;
} }
} }
/**
* A helper class that generates the correct URL for different schema.
*
*/
public static final class Url {
public static String authority(String scheme, DatanodeID d) {
if (scheme.equals("http")) {
return d.getInfoAddr();
} else if (scheme.equals("https")) {
return d.getInfoSecureAddr();
} else {
throw new IllegalArgumentException("Unknown scheme:" + scheme);
}
}
public static String url(String scheme, DatanodeID d) {
return scheme + "://" + authority(scheme, d);
}
}
public static DatanodeInfo bestNode(LocatedBlocks blks, Configuration conf) public static DatanodeInfo bestNode(LocatedBlocks blks, Configuration conf)
throws IOException { throws IOException {
HashMap<DatanodeInfo, NodeRecord> map = HashMap<DatanodeInfo, NodeRecord> map =
@ -217,7 +229,7 @@ public class JspHelper {
offsetIntoBlock, amtToRead, true, offsetIntoBlock, amtToRead, true,
"JspHelper", TcpPeerServer.peerFromSocketAndKey(s, encryptionKey), "JspHelper", TcpPeerServer.peerFromSocketAndKey(s, encryptionKey),
new DatanodeID(addr.getAddress().getHostAddress(), new DatanodeID(addr.getAddress().getHostAddress(),
addr.getHostName(), poolId, addr.getPort(), 0, 0), null, addr.getHostName(), poolId, addr.getPort(), 0, 0, 0), null,
null, null, false, CachingStrategy.newDefaultStrategy()); null, null, false, CachingStrategy.newDefaultStrategy());
final byte[] buf = new byte[amtToRead]; final byte[] buf = new byte[amtToRead];

View File

@ -18,56 +18,10 @@
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; import com.google.common.annotations.VisibleForTesting;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT; import com.google.common.base.Joiner;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY; import com.google.common.base.Preconditions;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT; import com.google.protobuf.BlockingService;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
import static org.apache.hadoop.util.ExitUtil.terminate;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.net.UnknownHostException;
import java.nio.channels.ClosedByInterruptException;
import java.nio.channels.SocketChannel;
import java.security.PrivilegedExceptionAction;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
@ -84,37 +38,15 @@ import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.net.DomainPeerServer; import org.apache.hadoop.hdfs.net.DomainPeerServer;
import org.apache.hadoop.hdfs.net.TcpPeerServer; import org.apache.hadoop.hdfs.net.TcpPeerServer;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.datatransfer.*;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService; import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.*;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.security.token.block.*;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@ -149,21 +81,21 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.*;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.JvmPauseMonitor;
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.VersionInfo;
import org.mortbay.util.ajax.JSON; import org.mortbay.util.ajax.JSON;
import com.google.common.annotations.VisibleForTesting; import java.io.*;
import com.google.common.base.Joiner; import java.net.*;
import com.google.common.base.Preconditions; import java.nio.channels.ClosedByInterruptException;
import com.google.protobuf.BlockingService; import java.nio.channels.SocketChannel;
import java.security.PrivilegedExceptionAction;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.apache.hadoop.util.ExitUtil.terminate;
/********************************************************** /**********************************************************
* DataNode is a class (and program) that stores a set of * DataNode is a class (and program) that stores a set of
@ -245,6 +177,7 @@ public class DataNode extends Configured
private volatile boolean heartbeatsDisabledForTests = false; private volatile boolean heartbeatsDisabledForTests = false;
private DataStorage storage = null; private DataStorage storage = null;
private HttpServer infoServer = null; private HttpServer infoServer = null;
private int infoSecurePort;
DataNodeMetrics metrics; DataNodeMetrics metrics;
private InetSocketAddress streamingAddr; private InetSocketAddress streamingAddr;
@ -359,16 +292,13 @@ public class DataNode extends Configured
InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf); InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
String infoHost = infoSocAddr.getHostName(); String infoHost = infoSocAddr.getHostName();
int tmpInfoPort = infoSocAddr.getPort(); int tmpInfoPort = infoSocAddr.getPort();
this.infoServer = (secureResources == null) HttpServer.Builder builder = new HttpServer.Builder().setName("datanode")
? new HttpServer.Builder().setName("datanode") .setBindAddress(infoHost).setPort(tmpInfoPort)
.setBindAddress(infoHost).setPort(tmpInfoPort) .setFindPort(tmpInfoPort == 0).setConf(conf)
.setFindPort(tmpInfoPort == 0).setConf(conf) .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
.setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))).build() this.infoServer = (secureResources == null) ? builder.build() :
: new HttpServer.Builder().setName("datanode") builder.setConnector(secureResources.getListener()).build();
.setBindAddress(infoHost).setPort(tmpInfoPort)
.setFindPort(tmpInfoPort == 0).setConf(conf)
.setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
.setConnector(secureResources.getListener()).build();
LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort); LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) { if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
@ -382,6 +312,7 @@ public class DataNode extends Configured
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("Datanode listening for SSL on " + secInfoSocAddr); LOG.debug("Datanode listening for SSL on " + secInfoSocAddr);
} }
infoSecurePort = secInfoSocAddr.getPort();
} }
this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class); this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
this.infoServer.addInternalServlet(null, "/getFileChecksum/*", this.infoServer.addInternalServlet(null, "/getFileChecksum/*",
@ -750,7 +681,8 @@ public class DataNode extends Configured
} }
DatanodeID dnId = new DatanodeID( DatanodeID dnId = new DatanodeID(
streamingAddr.getAddress().getHostAddress(), hostName, streamingAddr.getAddress().getHostAddress(), hostName,
storage.getDatanodeUuid(), getXferPort(), getInfoPort(), getIpcPort()); storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
infoSecurePort, getIpcPort());
return new DatanodeRegistration(dnId, storageInfo, return new DatanodeRegistration(dnId, storageInfo,
new ExportedBlockKeys(), VersionInfo.getVersion()); new ExportedBlockKeys(), VersionInfo.getVersion());
} }
@ -848,7 +780,7 @@ public class DataNode extends Configured
* If this is the first block pool to register, this also initializes * If this is the first block pool to register, this also initializes
* the datanode-scoped storage. * the datanode-scoped storage.
* *
* @param bpos block pool to initialize and register with the NameNode. * @param bpos Block pool offer service
* @throws IOException if the NN is inconsistent with the local storage. * @throws IOException if the NN is inconsistent with the local storage.
*/ */
void initBlockPool(BPOfferService bpos) throws IOException { void initBlockPool(BPOfferService bpos) throws IOException {
@ -2289,6 +2221,13 @@ public class DataNode extends Configured
return infoServer.getPort(); return infoServer.getPort();
} }
/**
* @return the datanode's https port
*/
public int getInfoSecurePort() {
return infoSecurePort;
}
/** /**
* Returned information is a JSON representation of a map with * Returned information is a JSON representation of a map with
* name node host name as the key and block pool Id as the value. * name node host name as the key and block pool Id as the value.

View File

@ -18,10 +18,9 @@
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import java.io.File; import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL; import java.net.URL;
import java.net.URLEncoder; import java.net.URLEncoder;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
@ -37,9 +36,9 @@ import javax.servlet.jsp.JspWriter;
import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.lang.StringEscapeUtils;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -47,20 +46,23 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
import org.apache.hadoop.http.HtmlQuoting; import org.apache.hadoop.http.HtmlQuoting;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionInfo;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
@InterfaceAudience.Private @InterfaceAudience.Private
public class DatanodeJspHelper { public class DatanodeJspHelper {
private static final int PREV_BLOCK = -1;
private static final int NEXT_BLOCK = 1;
private static DFSClient getDFSClient(final UserGroupInformation user, private static DFSClient getDFSClient(final UserGroupInformation user,
final String addr, final String addr,
final Configuration conf final Configuration conf
@ -143,10 +145,10 @@ public class DatanodeJspHelper {
out.print("Empty file"); out.print("Empty file");
} else { } else {
DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf); DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
String fqdn = canonicalize(chosenNode.getIpAddr());
int datanodePort = chosenNode.getXferPort(); int datanodePort = chosenNode.getXferPort();
String redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" String redirectLocation = JspHelper.Url.url(req.getScheme(),
+ chosenNode.getInfoPort() + "/browseBlock.jsp?blockId=" chosenNode)
+ "/browseBlock.jsp?blockId="
+ firstBlock.getBlock().getBlockId() + "&blockSize=" + firstBlock.getBlock().getBlockId() + "&blockSize="
+ firstBlock.getBlock().getNumBytes() + "&genstamp=" + firstBlock.getBlock().getNumBytes() + "&genstamp="
+ firstBlock.getBlock().getGenerationStamp() + "&filename=" + firstBlock.getBlock().getGenerationStamp() + "&filename="
@ -225,7 +227,7 @@ public class DatanodeJspHelper {
JspHelper.addTableFooter(out); JspHelper.addTableFooter(out);
} }
} }
out.print("<br><a href=\"" + HttpConfig.getSchemePrefix() out.print("<br><a href=\"///"
+ canonicalize(nnAddr) + ":" + canonicalize(nnAddr) + ":"
+ namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>"); + namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
dfs.close(); dfs.close();
@ -302,8 +304,7 @@ public class DatanodeJspHelper {
Long.MAX_VALUE).getLocatedBlocks(); Long.MAX_VALUE).getLocatedBlocks();
// Add the various links for looking at the file contents // Add the various links for looking at the file contents
// URL for downloading the full file // URL for downloading the full file
String downloadUrl = HttpConfig.getSchemePrefix() + req.getServerName() + ":" String downloadUrl = "/streamFile" + ServletUtil.encodePath(filename)
+ req.getServerPort() + "/streamFile" + ServletUtil.encodePath(filename)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr, true) + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr, true)
+ JspHelper.getDelegationTokenUrlParam(tokenString); + JspHelper.getDelegationTokenUrlParam(tokenString);
out.print("<a name=\"viewOptions\"></a>"); out.print("<a name=\"viewOptions\"></a>");
@ -319,8 +320,8 @@ public class DatanodeJspHelper {
dfs.close(); dfs.close();
return; return;
} }
String fqdn = canonicalize(chosenNode.getIpAddr());
String tailUrl = HttpConfig.getSchemePrefix() + fqdn + ":" + chosenNode.getInfoPort() String tailUrl = "///" + JspHelper.Url.authority(req.getScheme(), chosenNode)
+ "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8") + "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8")
+ "&namenodeInfoPort=" + namenodeInfoPort + "&namenodeInfoPort=" + namenodeInfoPort
+ "&chunkSizeToView=" + chunkSizeToView + "&chunkSizeToView=" + chunkSizeToView
@ -368,8 +369,7 @@ public class DatanodeJspHelper {
for (int j = 0; j < locs.length; j++) { for (int j = 0; j < locs.length; j++) {
String datanodeAddr = locs[j].getXferAddr(); String datanodeAddr = locs[j].getXferAddr();
datanodePort = locs[j].getXferPort(); datanodePort = locs[j].getXferPort();
fqdn = canonicalize(locs[j].getIpAddr()); String blockUrl = "///" + JspHelper.Url.authority(req.getScheme(), locs[j])
String blockUrl = HttpConfig.getSchemePrefix() + fqdn + ":" + locs[j].getInfoPort()
+ "/browseBlock.jsp?blockId=" + blockidstring + "/browseBlock.jsp?blockId=" + blockidstring
+ "&blockSize=" + blockSize + "&blockSize=" + blockSize
+ "&filename=" + URLEncoder.encode(filename, "UTF-8") + "&filename=" + URLEncoder.encode(filename, "UTF-8")
@ -380,7 +380,7 @@ public class DatanodeJspHelper {
+ JspHelper.getDelegationTokenUrlParam(tokenString) + JspHelper.getDelegationTokenUrlParam(tokenString)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr); + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
String blockInfoUrl = HttpConfig.getSchemePrefix() + nnCanonicalName + ":" String blockInfoUrl = "///" + nnCanonicalName + ":"
+ namenodeInfoPort + namenodeInfoPort
+ "/block_info_xml.jsp?blockId=" + blockidstring; + "/block_info_xml.jsp?blockId=" + blockidstring;
out.print("<td>&nbsp</td><td><a href=\"" + blockUrl + "\">" out.print("<td>&nbsp</td><td><a href=\"" + blockUrl + "\">"
@ -391,7 +391,7 @@ public class DatanodeJspHelper {
} }
out.println("</table>"); out.println("</table>");
out.print("<hr>"); out.print("<hr>");
out.print("<br><a href=\"" + HttpConfig.getSchemePrefix() out.print("<br><a href=\"///"
+ nnCanonicalName + ":" + nnCanonicalName + ":"
+ namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>"); + namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
dfs.close(); dfs.close();
@ -491,9 +491,7 @@ public class DatanodeJspHelper {
String parent = new File(filename).getParent(); String parent = new File(filename).getParent();
JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, parent, nnAddr); JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, parent, nnAddr);
out.print("<hr>"); out.print("<hr>");
out.print("<a href=\"" + HttpConfig.getSchemePrefix() out.print("<a href=\"/browseDirectory.jsp?dir=" + URLEncoder.encode(parent, "UTF-8")
+ req.getServerName() + ":" + req.getServerPort()
+ "/browseDirectory.jsp?dir=" + URLEncoder.encode(parent, "UTF-8")
+ "&namenodeInfoPort=" + namenodeInfoPort + "&namenodeInfoPort=" + namenodeInfoPort
+ JspHelper.getDelegationTokenUrlParam(tokenString) + JspHelper.getDelegationTokenUrlParam(tokenString)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr) + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr)
@ -501,112 +499,23 @@ public class DatanodeJspHelper {
out.print("<a href=\"#viewOptions\">Advanced view/download options</a><br>"); out.print("<a href=\"#viewOptions\">Advanced view/download options</a><br>");
out.print("<hr>"); out.print("<hr>");
// Determine the prev & next blocks String authority = req.getServerName() + ":" + req.getServerPort();
long nextStartOffset = 0; String nextUrl = generateLinksForAdjacentBlock(NEXT_BLOCK, authority,
long nextBlockSize = 0; datanodePort, startOffset, chunkSizeToView, blockSize, blockId,
String nextBlockIdStr = null; genStamp, dfs, filename, conf, req.getScheme(), tokenString,
String nextGenStamp = null; namenodeInfoPort, nnAddr);
String nextHost = req.getServerName(); if (nextUrl != null) {
int nextPort = req.getServerPort();
int nextDatanodePort = datanodePort;
// determine data for the next link
if (startOffset + chunkSizeToView >= blockSize) {
// we have to go to the next block from this point onwards
List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
Long.MAX_VALUE).getLocatedBlocks();
for (int i = 0; i < blocks.size(); i++) {
if (blocks.get(i).getBlock().getBlockId() == blockId) {
if (i != blocks.size() - 1) {
LocatedBlock nextBlock = blocks.get(i + 1);
nextBlockIdStr = Long.toString(nextBlock.getBlock().getBlockId());
nextGenStamp = Long.toString(nextBlock.getBlock()
.getGenerationStamp());
nextStartOffset = 0;
nextBlockSize = nextBlock.getBlock().getNumBytes();
DatanodeInfo d = JspHelper.bestNode(nextBlock, conf);
nextDatanodePort = d.getXferPort();
nextHost = d.getIpAddr();
nextPort = d.getInfoPort();
}
}
}
} else {
// we are in the same block
nextBlockIdStr = blockId.toString();
nextStartOffset = startOffset + chunkSizeToView;
nextBlockSize = blockSize;
nextGenStamp = genStamp.toString();
}
String nextUrl = null;
if (nextBlockIdStr != null) {
nextUrl = HttpConfig.getSchemePrefix() + canonicalize(nextHost) + ":" + nextPort
+ "/browseBlock.jsp?blockId=" + nextBlockIdStr
+ "&blockSize=" + nextBlockSize
+ "&startOffset=" + nextStartOffset
+ "&genstamp=" + nextGenStamp
+ "&filename=" + URLEncoder.encode(filename, "UTF-8")
+ "&chunkSizeToView=" + chunkSizeToView
+ "&datanodePort=" + nextDatanodePort
+ "&namenodeInfoPort=" + namenodeInfoPort
+ JspHelper.getDelegationTokenUrlParam(tokenString)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
out.print("<a href=\"" + nextUrl + "\">View Next chunk</a>&nbsp;&nbsp;"); out.print("<a href=\"" + nextUrl + "\">View Next chunk</a>&nbsp;&nbsp;");
} }
// determine data for the prev link
String prevBlockIdStr = null;
String prevGenStamp = null;
long prevStartOffset = 0;
long prevBlockSize = 0;
String prevHost = req.getServerName();
int prevPort = req.getServerPort();
int prevDatanodePort = datanodePort;
if (startOffset == 0) {
List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
Long.MAX_VALUE).getLocatedBlocks();
for (int i = 0; i < blocks.size(); i++) {
if (blocks.get(i).getBlock().getBlockId() == blockId) {
if (i != 0) {
LocatedBlock prevBlock = blocks.get(i - 1);
prevBlockIdStr = Long.toString(prevBlock.getBlock().getBlockId());
prevGenStamp = Long.toString(prevBlock.getBlock()
.getGenerationStamp());
prevStartOffset = prevBlock.getBlock().getNumBytes()
- chunkSizeToView;
if (prevStartOffset < 0)
prevStartOffset = 0;
prevBlockSize = prevBlock.getBlock().getNumBytes();
DatanodeInfo d = JspHelper.bestNode(prevBlock, conf);
prevDatanodePort = d.getXferPort();
prevHost = d.getIpAddr();
prevPort = d.getInfoPort();
}
}
}
} else {
// we are in the same block
prevBlockIdStr = blockId.toString();
prevStartOffset = startOffset - chunkSizeToView;
if (prevStartOffset < 0)
prevStartOffset = 0;
prevBlockSize = blockSize;
prevGenStamp = genStamp.toString();
}
String prevUrl = null; String prevUrl = generateLinksForAdjacentBlock(PREV_BLOCK, authority,
if (prevBlockIdStr != null) { datanodePort, startOffset, chunkSizeToView, blockSize, blockId,
prevUrl = HttpConfig.getSchemePrefix() + canonicalize(prevHost) + ":" + prevPort genStamp, dfs, filename, conf, req.getScheme(), tokenString,
+ "/browseBlock.jsp?blockId=" + prevBlockIdStr namenodeInfoPort, nnAddr);
+ "&blockSize=" + prevBlockSize if (prevUrl != null) {
+ "&startOffset=" + prevStartOffset
+ "&filename=" + URLEncoder.encode(filename, "UTF-8")
+ "&chunkSizeToView=" + chunkSizeToView
+ "&genstamp=" + prevGenStamp
+ "&datanodePort=" + prevDatanodePort
+ "&namenodeInfoPort=" + namenodeInfoPort
+ JspHelper.getDelegationTokenUrlParam(tokenString)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
out.print("<a href=\"" + prevUrl + "\">View Prev chunk</a>&nbsp;&nbsp;"); out.print("<a href=\"" + prevUrl + "\">View Prev chunk</a>&nbsp;&nbsp;");
} }
out.print("<hr>"); out.print("<hr>");
out.print("<textarea cols=\"100\" rows=\"25\" wrap=\"virtual\" style=\"width:100%\" READONLY>"); out.print("<textarea cols=\"100\" rows=\"25\" wrap=\"virtual\" style=\"width:100%\" READONLY>");
try { try {
@ -621,6 +530,71 @@ public class DatanodeJspHelper {
dfs.close(); dfs.close();
} }
private static String generateLinksForAdjacentBlock(final int direction,
String authority, int datanodePort, long startOffset,
int chunkSizeToView, long blockSize, long blockId, Long genStamp,
final DFSClient dfs, final String filename, final Configuration conf,
final String scheme, final String tokenString,
final int namenodeInfoPort, final String nnAddr)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
boolean found = false;
if ((direction == NEXT_BLOCK && startOffset + chunkSizeToView < blockSize)
|| (direction == PREV_BLOCK && startOffset != 0)) {
// we are in the same block
found = true;
if (direction == NEXT_BLOCK) {
startOffset = startOffset + chunkSizeToView;
} else {
startOffset = Math.max(0, startOffset - chunkSizeToView);
}
} else {
List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
Long.MAX_VALUE).getLocatedBlocks();
final long curBlockId = blockId;
int curBlockIdx = Iterables.indexOf(blocks, new Predicate<LocatedBlock>() {
@Override
public boolean apply(LocatedBlock b) {
return b.getBlock().getBlockId() == curBlockId;
}
});
found = curBlockIdx != -1 &&
((direction == NEXT_BLOCK && curBlockIdx < blocks.size() - 1)
|| (direction == PREV_BLOCK && curBlockIdx > 0));
if (found) {
LocatedBlock nextBlock = blocks.get(curBlockIdx + direction);
blockId = nextBlock.getBlock().getBlockId();
genStamp = nextBlock.getBlock().getGenerationStamp();
startOffset = 0;
blockSize = nextBlock.getBlock().getNumBytes();
DatanodeInfo d = JspHelper.bestNode(nextBlock, conf);
datanodePort = d.getXferPort();
authority = JspHelper.Url.authority(scheme, d);
}
}
if (found) {
return "///" + authority
+ "/browseBlock.jsp?blockId=" + blockId
+ "&blockSize=" + blockSize
+ "&startOffset=" + startOffset
+ "&genstamp=" + genStamp
+ "&filename=" + URLEncoder.encode(filename, "UTF-8")
+ "&chunkSizeToView=" + chunkSizeToView
+ "&datanodePort=" + datanodePort
+ "&namenodeInfoPort=" + namenodeInfoPort
+ JspHelper.getDelegationTokenUrlParam(tokenString)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
} else {
return null;
}
}
static void generateFileChunksForTail(JspWriter out, HttpServletRequest req, static void generateFileChunksForTail(JspWriter out, HttpServletRequest req,
Configuration conf Configuration conf
) throws IOException, ) throws IOException,

View File

@ -827,7 +827,7 @@ class ClusterJspHelper {
doc.startTag("item"); doc.startTag("item");
doc.attribute("label", label); doc.attribute("label", label);
doc.attribute("value", value); doc.attribute("value", value);
doc.attribute("link", HttpConfig.getSchemePrefix() + url); doc.attribute("link", "///" + url);
doc.endTag(); // item doc.endTag(); // item
} }
@ -887,7 +887,16 @@ class ClusterJspHelper {
private static String queryMbean(String httpAddress, Configuration conf) private static String queryMbean(String httpAddress, Configuration conf)
throws IOException { throws IOException {
URL url = new URL(HttpConfig.getSchemePrefix() + httpAddress+JMX_QRY); /**
* Although the other namenode might support HTTPS, it is fundamentally
* broken to get the JMX via an HTTPS connection inside the namenode,
* because in HTTPS set up the principal of the client and the one of
* the namenode differs. Therefore, there is no guarantees that the
* HTTPS connection can be set up.
*
* As a result, we just hard code the connection as an HTTP connection.
*/
URL url = new URL("http://" + httpAddress + JMX_QRY);
return readOutput(url); return readOutput(url);
} }
/** /**

View File

@ -123,6 +123,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsServerDefaults;
@ -165,14 +166,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.*;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.OutOfV1GenerationStampsException;
import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
@ -214,6 +208,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.util.ChunkedArrayList; import org.apache.hadoop.hdfs.util.ChunkedArrayList;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.RetryCache; import org.apache.hadoop.ipc.RetryCache;
import org.apache.hadoop.ipc.RetryCache.CacheEntry; import org.apache.hadoop.ipc.RetryCache.CacheEntry;
import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload;
@ -242,6 +237,7 @@ import org.mortbay.util.ajax.JSON;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets; import com.google.common.base.Charsets;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
/** /**
@ -462,6 +458,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
private HAContext haContext; private HAContext haContext;
private final boolean haEnabled; private final boolean haEnabled;
/**
* Whether the namenode is in the middle of starting the active service
*/
private volatile boolean startingActiveService = false;
private INodeId inodeId; private INodeId inodeId;
@ -910,6 +911,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @throws IOException * @throws IOException
*/ */
void startActiveServices() throws IOException { void startActiveServices() throws IOException {
startingActiveService = true;
LOG.info("Starting services required for active state"); LOG.info("Starting services required for active state");
writeLock(); writeLock();
try { try {
@ -964,8 +966,19 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
nnrmthread.start(); nnrmthread.start();
} finally { } finally {
writeUnlock(); writeUnlock();
startingActiveService = false;
} }
} }
/**
* @return Whether the namenode is transitioning to active state and is in the
* middle of the {@link #startActiveServices()}
*/
public boolean inTransitionToActive() {
return haEnabled && haContext != null
&& haContext.getState().getServiceState() == HAServiceState.ACTIVE
&& startingActiveService;
}
private boolean shouldUseDelegationTokens() { private boolean shouldUseDelegationTokens() {
return UserGroupInformation.isSecurityEnabled() || return UserGroupInformation.isSecurityEnabled() ||
@ -1058,6 +1071,26 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
} }
/**
* @throws RetriableException
* If 1) The NameNode is in SafeMode, 2) HA is enabled, and 3)
* NameNode is in active state
* @throws SafeModeException
* Otherwise if NameNode is in SafeMode.
*/
private void checkNameNodeSafeMode(String errorMsg)
throws RetriableException, SafeModeException {
if (isInSafeMode()) {
SafeModeException se = new SafeModeException(errorMsg, safeMode);
if (haEnabled && haContext != null
&& haContext.getState().getServiceState() == HAServiceState.ACTIVE) {
throw new RetriableException(se);
} else {
throw se;
}
}
}
public static Collection<URI> getNamespaceDirs(Configuration conf) { public static Collection<URI> getNamespaceDirs(Configuration conf) {
return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY); return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY);
} }
@ -1359,9 +1392,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot set permission for " + src);
throw new SafeModeException("Cannot set permission for " + src, safeMode);
}
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOwner(pc, src); checkOwner(pc, src);
dir.setPermission(src, permission); dir.setPermission(src, permission);
@ -1398,9 +1429,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot set owner for " + src);
throw new SafeModeException("Cannot set owner for " + src, safeMode);
}
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOwner(pc, src); checkOwner(pc, src);
if (!pc.isSuperUser()) { if (!pc.isSuperUser()) {
@ -1480,8 +1509,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
for (LocatedBlock b : ret.getLocatedBlocks()) { for (LocatedBlock b : ret.getLocatedBlocks()) {
// if safemode & no block locations yet then throw safemodeException // if safemode & no block locations yet then throw safemodeException
if ((b.getLocations() == null) || (b.getLocations().length == 0)) { if ((b.getLocations() == null) || (b.getLocations().length == 0)) {
throw new SafeModeException("Zero blocklocations for " + src, SafeModeException se = new SafeModeException(
safeMode); "Zero blocklocations for " + src, safeMode);
if (haEnabled && haContext != null &&
haContext.getState().getServiceState() == HAServiceState.ACTIVE) {
throw new RetriableException(se);
} else {
throw se;
}
} }
} }
} }
@ -1622,9 +1657,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot concat " + target);
throw new SafeModeException("Cannot concat " + target, safeMode);
}
concatInternal(pc, target, srcs, logRetryCache); concatInternal(pc, target, srcs, logRetryCache);
resultingStat = getAuditFileInfo(target, false); resultingStat = getAuditFileInfo(target, false);
} finally { } finally {
@ -1772,9 +1805,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot set times " + src);
throw new SafeModeException("Cannot set times " + src, safeMode);
}
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
// Write access is required to set access and modification times // Write access is required to set access and modification times
@ -1801,16 +1832,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
void createSymlink(String target, String link, void createSymlink(String target, String link,
PermissionStatus dirPerms, boolean createParent) PermissionStatus dirPerms, boolean createParent)
throws IOException, UnresolvedLinkException { throws IOException, UnresolvedLinkException {
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
if (!DFSUtil.isValidName(link)) { if (!DFSUtil.isValidName(link)) {
throw new InvalidPathException("Invalid link name: " + link); throw new InvalidPathException("Invalid link name: " + link);
} }
if (FSDirectory.isReservedName(target)) { if (FSDirectory.isReservedName(target)) {
throw new InvalidPathException("Invalid target name: " + target); throw new InvalidPathException("Invalid target name: " + target);
} }
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false; boolean success = false;
try { try {
createSymlinkInt(target, link, dirPerms, createParent, cacheEntry != null); createSymlinkInt(target, link, dirPerms, createParent, cacheEntry != null);
@ -1837,9 +1868,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot create symlink " + link);
throw new SafeModeException("Cannot create symlink " + link, safeMode);
}
link = FSDirectory.resolvePath(link, pathComponents, dir); link = FSDirectory.resolvePath(link, pathComponents, dir);
if (!createParent) { if (!createParent) {
verifyParentDir(link); verifyParentDir(link);
@ -1897,9 +1926,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot set replication for " + src);
throw new SafeModeException("Cannot set replication for " + src, safeMode);
}
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
if (isPermissionEnabled) { if (isPermissionEnabled) {
checkPathAccess(pc, src, FsAction.WRITE); checkPathAccess(pc, src, FsAction.WRITE);
@ -2029,9 +2056,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot create file" + src);
throw new SafeModeException("Cannot create file" + src, safeMode);
}
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
startFileInternal(pc, src, permissions, holder, clientMachine, create, startFileInternal(pc, src, permissions, holder, clientMachine, create,
overwrite, createParent, replication, blockSize, logRetryCache); overwrite, createParent, replication, blockSize, logRetryCache);
@ -2250,10 +2275,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot recover the lease of " + src);
throw new SafeModeException(
"Cannot recover the lease of " + src, safeMode);
}
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
final INodeFile inode = INodeFile.valueOf(dir.getINode(src), src); final INodeFile inode = INodeFile.valueOf(dir.getINode(src), src);
if (!inode.isUnderConstruction()) { if (!inode.isUnderConstruction()) {
@ -2404,9 +2426,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot append to file" + src);
throw new SafeModeException("Cannot append to file" + src, safeMode);
}
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
lb = appendFileInternal(pc, src, holder, clientMachine, logRetryCache); lb = appendFileInternal(pc, src, holder, clientMachine, logRetryCache);
} catch (StandbyException se) { } catch (StandbyException se) {
@ -2556,9 +2576,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
checkBlock(previous); checkBlock(previous);
onRetryBlock[0] = null; onRetryBlock[0] = null;
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot add block to " + src);
throw new SafeModeException("Cannot add block to " + src, safeMode);
}
// have we exceeded the configured limit of fs objects. // have we exceeded the configured limit of fs objects.
checkFsObjectLimit(); checkFsObjectLimit();
@ -2667,10 +2685,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
try { try {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
//check safe mode //check safe mode
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk);
throw new SafeModeException("Cannot add datanode; src=" + src
+ ", blk=" + blk, safeMode);
}
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
//check lease //check lease
@ -2710,10 +2725,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot abandon block " + b + " for fle" + src);
throw new SafeModeException("Cannot abandon block " + b +
" for fle" + src, safeMode);
}
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
// //
@ -2796,9 +2808,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot complete file " + src);
throw new SafeModeException("Cannot complete file " + src, safeMode);
}
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
success = completeFileInternal(src, holder, success = completeFileInternal(src, holder,
ExtendedBlock.getLocalBlock(last), fileId); ExtendedBlock.getLocalBlock(last), fileId);
@ -2973,9 +2983,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot rename " + src);
throw new SafeModeException("Cannot rename " + src, safeMode);
}
src = FSDirectory.resolvePath(src, srcComponents, dir); src = FSDirectory.resolvePath(src, srcComponents, dir);
dst = FSDirectory.resolvePath(dst, dstComponents, dir); dst = FSDirectory.resolvePath(dst, dstComponents, dir);
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
@ -3025,10 +3033,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/** Rename src to dst */ /** Rename src to dst */
void renameTo(String src, String dst, Options.Rename... options) void renameTo(String src, String dst, Options.Rename... options)
throws IOException, UnresolvedLinkException { throws IOException, UnresolvedLinkException {
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
if (NameNode.stateChangeLog.isDebugEnabled()) { if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options - " NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options - "
+ src + " to " + dst); + src + " to " + dst);
@ -3036,8 +3040,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (!DFSUtil.isValidName(dst)) { if (!DFSUtil.isValidName(dst)) {
throw new InvalidPathException("Invalid name: " + dst); throw new InvalidPathException("Invalid name: " + dst);
} }
FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src); byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src);
byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst); byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst);
HdfsFileStatus resultingStat = null; HdfsFileStatus resultingStat = null;
@ -3045,9 +3054,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot rename " + src);
throw new SafeModeException("Cannot rename " + src, safeMode);
}
src = FSDirectory.resolvePath(src, srcComponents, dir); src = FSDirectory.resolvePath(src, srcComponents, dir);
dst = FSDirectory.resolvePath(dst, dstComponents, dir); dst = FSDirectory.resolvePath(dst, dstComponents, dir);
renameToInternal(pc, src, dst, cacheEntry != null, options); renameToInternal(pc, src, dst, cacheEntry != null, options);
@ -3153,9 +3160,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot delete " + src);
throw new SafeModeException("Cannot delete " + src, safeMode);
}
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
if (!recursive && dir.isNonEmptyDirectory(src)) { if (!recursive && dir.isNonEmptyDirectory(src)) {
throw new IOException(src + " is non empty"); throw new IOException(src + " is non empty");
@ -3374,9 +3379,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot create directory " + src);
throw new SafeModeException("Cannot create directory " + src, safeMode);
}
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
status = mkdirsInternal(pc, src, permissions, createParent); status = mkdirsInternal(pc, src, permissions, createParent);
if (status) { if (status) {
@ -3476,9 +3479,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot set quota on " + path);
throw new SafeModeException("Cannot set quota on " + path, safeMode);
}
dir.setQuota(path, nsQuota, dsQuota); dir.setQuota(path, nsQuota, dsQuota);
} finally { } finally {
writeUnlock(); writeUnlock();
@ -3501,9 +3502,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot fsync file " + src);
throw new SafeModeException("Cannot fsync file " + src, safeMode);
}
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
INodeFileUnderConstruction pendingFile = checkLease(src, clientName); INodeFileUnderConstruction pendingFile = checkLease(src, clientName);
if (lastBlockLength > 0) { if (lastBlockLength > 0) {
@ -3707,6 +3706,39 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
BlockInfo getStoredBlock(Block block) { BlockInfo getStoredBlock(Block block) {
return blockManager.getStoredBlock(block); return blockManager.getStoredBlock(block);
} }
@Override
public boolean isInSnapshot(BlockInfoUnderConstruction blockUC) {
assert hasReadOrWriteLock();
final BlockCollection bc = blockUC.getBlockCollection();
if (bc == null || !(bc instanceof INodeFileUnderConstruction)) {
return false;
}
INodeFileUnderConstruction inodeUC = (INodeFileUnderConstruction) blockUC
.getBlockCollection();
String fullName = inodeUC.getName();
try {
if (fullName != null && fullName.startsWith(Path.SEPARATOR)
&& dir.getINode(fullName) == inodeUC) {
// If file exists in normal path then no need to look in snapshot
return false;
}
} catch (UnresolvedLinkException e) {
LOG.error("Error while resolving the link : " + fullName, e);
return false;
}
/*
* 1. if bc is an instance of INodeFileUnderConstructionWithSnapshot, and
* bc is not in the current fsdirectory tree, bc must represent a snapshot
* file.
* 2. if fullName is not an absolute path, bc cannot be existent in the
* current fsdirectory tree.
* 3. if bc is not the current node associated with fullName, bc must be a
* snapshot inode.
*/
return true;
}
void commitBlockSynchronization(ExtendedBlock lastblock, void commitBlockSynchronization(ExtendedBlock lastblock,
long newgenerationstamp, long newlength, long newgenerationstamp, long newlength,
@ -3728,11 +3760,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
// If a DN tries to commit to the standby, the recovery will // If a DN tries to commit to the standby, the recovery will
// fail, and the next retry will succeed on the new NN. // fail, and the next retry will succeed on the new NN.
if (isInSafeMode()) { checkNameNodeSafeMode(
throw new SafeModeException( "Cannot commitBlockSynchronization while in safe mode");
"Cannot commitBlockSynchronization while in safe mode",
safeMode);
}
final BlockInfo storedBlock = getStoredBlock( final BlockInfo storedBlock = getStoredBlock(
ExtendedBlock.getLocalBlock(lastblock)); ExtendedBlock.getLocalBlock(lastblock));
if (storedBlock == null) { if (storedBlock == null) {
@ -3885,9 +3914,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot renew lease for " + holder);
throw new SafeModeException("Cannot renew lease for " + holder, safeMode);
}
leaseManager.renewLease(holder); leaseManager.renewLease(holder);
} finally { } finally {
writeUnlock(); writeUnlock();
@ -3924,11 +3951,27 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
FSPermissionChecker pc = getPermissionChecker(); FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
String startAfterString = new String(startAfter);
readLock(); readLock();
try { try {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
// Get file name when startAfter is an INodePath
if (FSDirectory.isReservedName(startAfterString)) {
byte[][] startAfterComponents = FSDirectory
.getPathComponentsForReservedPath(startAfterString);
try {
String tmp = FSDirectory.resolvePath(src, startAfterComponents, dir);
byte[][] regularPath = INode.getPathComponents(tmp);
startAfter = regularPath[regularPath.length - 1];
} catch (IOException e) {
// Possibly the inode is deleted
throw new DirectoryListingStartAfterNotFoundException(
"Can't find startAfter " + startAfterString);
}
}
if (isPermissionEnabled) { if (isPermissionEnabled) {
if (dir.isDir(src)) { if (dir.isDir(src)) {
checkPathAccess(pc, src, FsAction.READ_EXECUTE); checkPathAccess(pc, src, FsAction.READ_EXECUTE);
@ -4218,7 +4261,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return JSON.toString(info); return JSON.toString(info);
} }
int getNumberOfDatanodes(DatanodeReportType type) { int getNumberOfDatanodes(DatanodeReportType type) {
readLock(); readLock();
try { try {
@ -4258,19 +4300,20 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @throws IOException if * @throws IOException if
*/ */
void saveNamespace() throws AccessControlException, IOException { void saveNamespace() throws AccessControlException, IOException {
checkOperation(OperationCategory.UNCHECKED);
checkSuperuserPrivilege();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) { if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response return; // Return previous response
} }
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
boolean success = false; boolean success = false;
readLock(); readLock();
try { try {
checkOperation(OperationCategory.UNCHECKED); checkOperation(OperationCategory.UNCHECKED);
if (!isInSafeMode()) { if (!isInSafeMode()) {
throw new IOException("Safe mode should be turned ON " + throw new IOException("Safe mode should be turned ON "
"in order to create namespace image."); + "in order to create namespace image.");
} }
getFSImage().saveNamespace(this); getFSImage().saveNamespace(this);
success = true; success = true;
@ -4347,7 +4390,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* replicas, and calculates the ratio of safe blocks to the total number * replicas, and calculates the ratio of safe blocks to the total number
* of blocks in the system, which is the size of blocks in * of blocks in the system, which is the size of blocks in
* {@link FSNamesystem#blockManager}. When the ratio reaches the * {@link FSNamesystem#blockManager}. When the ratio reaches the
* {@link #threshold} it starts the {@link SafeModeMonitor} daemon in order * {@link #threshold} it starts the SafeModeMonitor daemon in order
* to monitor whether the safe mode {@link #extension} is passed. * to monitor whether the safe mode {@link #extension} is passed.
* Then it leaves safe mode and destroys itself. * Then it leaves safe mode and destroys itself.
* <p> * <p>
@ -4355,10 +4398,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* not tracked because the name node is not intended to leave safe mode * not tracked because the name node is not intended to leave safe mode
* automatically in the case. * automatically in the case.
* *
* @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction) * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction, boolean)
* @see SafeModeMonitor
*/ */
class SafeModeInfo { public class SafeModeInfo {
// configuration fields // configuration fields
/** Safe mode threshold condition %.*/ /** Safe mode threshold condition %.*/
private double threshold; private double threshold;
@ -5100,9 +5142,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.JOURNAL); checkOperation(OperationCategory.JOURNAL);
if (isInSafeMode()) { checkNameNodeSafeMode("Log not rolled");
throw new SafeModeException("Log not rolled", safeMode);
}
LOG.info("Roll Edit Log from " + Server.getRemoteAddress()); LOG.info("Roll Edit Log from " + Server.getRemoteAddress());
return getFSImage().rollEditLog(); return getFSImage().rollEditLog();
} finally { } finally {
@ -5123,9 +5163,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
try { try {
checkOperation(OperationCategory.CHECKPOINT); checkOperation(OperationCategory.CHECKPOINT);
if (isInSafeMode()) { checkNameNodeSafeMode("Checkpoint not started");
throw new SafeModeException("Checkpoint not started", safeMode);
}
LOG.info("Start checkpoint for " + backupNode.getAddress()); LOG.info("Start checkpoint for " + backupNode.getAddress());
cmd = getFSImage().startCheckpoint(backupNode, activeNamenode); cmd = getFSImage().startCheckpoint(backupNode, activeNamenode);
getEditLog().logSync(); getEditLog().logSync();
@ -5149,19 +5187,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
void endCheckpoint(NamenodeRegistration registration, void endCheckpoint(NamenodeRegistration registration,
CheckpointSignature sig) throws IOException { CheckpointSignature sig) throws IOException {
checkOperation(OperationCategory.CHECKPOINT);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) { if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response return; // Return previous response
} }
checkOperation(OperationCategory.CHECKPOINT);
boolean success = false; boolean success = false;
readLock(); readLock();
try { try {
checkOperation(OperationCategory.CHECKPOINT); checkOperation(OperationCategory.CHECKPOINT);
if (isInSafeMode()) { checkNameNodeSafeMode("Checkpoint not ended");
throw new SafeModeException("Checkpoint not ended", safeMode);
}
LOG.info("End checkpoint for " + registration.getAddress()); LOG.info("End checkpoint for " + registration.getAddress());
getFSImage().endCheckpoint(sig); getFSImage().endCheckpoint(sig);
success = true; success = true;
@ -5262,7 +5298,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/** /**
* Get the total number of objects in the system. * Get the total number of objects in the system.
*/ */
long getMaxObjects() { @Override // FSNamesystemMBean
public long getMaxObjects() {
return maxFsObjects; return maxFsObjects;
} }
@ -5407,7 +5444,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
@Override // FSNamesystemMBean @Override // FSNamesystemMBean
public int getNumDecomDeadDataNodes() { public int getNumDecomDeadDataNodes() {
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>(); final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
getBlockManager().getDatanodeManager().fetchDatanodes(dead, null, true); getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, true);
int deadDecommissioned = 0; int deadDecommissioned = 0;
for (DatanodeDescriptor node : dead) { for (DatanodeDescriptor node : dead) {
deadDecommissioned += node.isDecommissioned() ? 1 : 0; deadDecommissioned += node.isDecommissioned() ? 1 : 0;
@ -5415,6 +5452,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return deadDecommissioned; return deadDecommissioned;
} }
@Override // FSNamesystemMBean
public int getNumDecommissioningDataNodes() {
return getBlockManager().getDatanodeManager().getDecommissioningNodes()
.size();
}
@Override // FSNamesystemMBean @Override // FSNamesystemMBean
@Metric({"StaleDataNodes", @Metric({"StaleDataNodes",
"Number of datanodes marked stale due to delayed heartbeat"}) "Number of datanodes marked stale due to delayed heartbeat"})
@ -5513,10 +5556,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
long nextGenerationStamp(boolean legacyBlock) long nextGenerationStamp(boolean legacyBlock)
throws IOException, SafeModeException { throws IOException, SafeModeException {
assert hasWriteLock(); assert hasWriteLock();
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot get next generation stamp");
throw new SafeModeException(
"Cannot get next generation stamp", safeMode);
}
long gs; long gs;
if (legacyBlock) { if (legacyBlock) {
@ -5569,12 +5609,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/** /**
* Increments, logs and then returns the block ID * Increments, logs and then returns the block ID
*/ */
private long nextBlockId() throws SafeModeException { private long nextBlockId() throws IOException {
assert hasWriteLock(); assert hasWriteLock();
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot get next block ID");
throw new SafeModeException(
"Cannot get next block ID", safeMode);
}
final long blockId = blockIdGenerator.nextValue(); final long blockId = blockIdGenerator.nextValue();
getEditLog().logAllocateBlockId(blockId); getEditLog().logAllocateBlockId(blockId);
// NB: callers sync the log // NB: callers sync the log
@ -5584,10 +5621,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
private INodeFileUnderConstruction checkUCBlock(ExtendedBlock block, private INodeFileUnderConstruction checkUCBlock(ExtendedBlock block,
String clientName) throws IOException { String clientName) throws IOException {
assert hasWriteLock(); assert hasWriteLock();
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot get a new generation stamp and an "
throw new SafeModeException("Cannot get a new generation stamp and an " + + "access token for block " + block);
"access token for block " + block, safeMode);
}
// check stored block state // check stored block state
BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block)); BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block));
@ -5685,11 +5720,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
void updatePipeline(String clientName, ExtendedBlock oldBlock, void updatePipeline(String clientName, ExtendedBlock oldBlock,
ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs) ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs)
throws IOException { throws IOException {
checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) { if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response return; // Return previous response
} }
checkOperation(OperationCategory.WRITE);
LOG.info("updatePipeline(block=" + oldBlock LOG.info("updatePipeline(block=" + oldBlock
+ ", newGenerationStamp=" + newBlock.getGenerationStamp() + ", newGenerationStamp=" + newBlock.getGenerationStamp()
+ ", newLength=" + newBlock.getNumBytes() + ", newLength=" + newBlock.getNumBytes()
@ -5700,9 +5735,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
boolean success = false; boolean success = false;
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Pipeline not updated");
throw new SafeModeException("Pipeline not updated", safeMode);
}
assert newBlock.getBlockId()==oldBlock.getBlockId() : newBlock + " and " assert newBlock.getBlockId()==oldBlock.getBlockId() : newBlock + " and "
+ oldBlock + " has different block identifier"; + oldBlock + " has different block identifier";
updatePipelineInternal(clientName, oldBlock, newBlock, newNodes, updatePipelineInternal(clientName, oldBlock, newBlock, newNodes,
@ -5957,9 +5990,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot issue delegation token");
throw new SafeModeException("Cannot issue delegation token", safeMode);
}
if (!isAllowedDelegationTokenOp()) { if (!isAllowedDelegationTokenOp()) {
throw new IOException( throw new IOException(
"Delegation Token can be issued only with kerberos or web authentication"); "Delegation Token can be issued only with kerberos or web authentication");
@ -6004,9 +6035,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot renew delegation token");
throw new SafeModeException("Cannot renew delegation token", safeMode);
}
if (!isAllowedDelegationTokenOp()) { if (!isAllowedDelegationTokenOp()) {
throw new IOException( throw new IOException(
"Delegation Token can be renewed only with kerberos or web authentication"); "Delegation Token can be renewed only with kerberos or web authentication");
@ -6037,9 +6066,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot cancel delegation token");
throw new SafeModeException("Cannot cancel delegation token", safeMode);
}
String canceller = getRemoteUser().getUserName(); String canceller = getRemoteUser().getUserName();
DelegationTokenIdentifier id = dtSecretManager DelegationTokenIdentifier id = dtSecretManager
.cancelToken(token, canceller); .cancelToken(token, canceller);
@ -6265,14 +6292,25 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(live, null, true); blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
for (DatanodeDescriptor node : live) { for (DatanodeDescriptor node : live) {
final Map<String, Object> innerinfo = new HashMap<String, Object>(); Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
innerinfo.put("lastContact", getLastContact(node)); .put("infoAddr", node.getInfoAddr())
innerinfo.put("usedSpace", getDfsUsed(node)); .put("infoSecureAddr", node.getInfoSecureAddr())
innerinfo.put("adminState", node.getAdminState().toString()); .put("xferaddr", node.getXferAddr())
innerinfo.put("nonDfsUsedSpace", node.getNonDfsUsed()); .put("lastContact", getLastContact(node))
innerinfo.put("capacity", node.getCapacity()); .put("usedSpace", getDfsUsed(node))
innerinfo.put("numBlocks", node.numBlocks()); .put("adminState", node.getAdminState().toString())
innerinfo.put("version", node.getSoftwareVersion()); .put("nonDfsUsedSpace", node.getNonDfsUsed())
.put("capacity", node.getCapacity())
.put("numBlocks", node.numBlocks())
.put("version", node.getSoftwareVersion())
.put("used", node.getDfsUsed())
.put("remaining", node.getRemaining())
.put("blockScheduled", node.getBlocksScheduled())
.put("blockPoolUsed", node.getBlockPoolUsed())
.put("blockPoolUsedPercent", node.getBlockPoolUsedPercent())
.put("volfails", node.getVolumeFailures())
.build();
info.put(node.getHostName(), innerinfo); info.put(node.getHostName(), innerinfo);
} }
return JSON.toString(info); return JSON.toString(info);
@ -6289,9 +6327,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>(); final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(null, dead, true); blockManager.getDatanodeManager().fetchDatanodes(null, dead, true);
for (DatanodeDescriptor node : dead) { for (DatanodeDescriptor node : dead) {
final Map<String, Object> innerinfo = new HashMap<String, Object>(); Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
innerinfo.put("lastContact", getLastContact(node)); .put("lastContact", getLastContact(node))
innerinfo.put("decommissioned", node.isDecommissioned()); .put("decommissioned", node.isDecommissioned())
.put("xferaddr", node.getXferAddr())
.build();
info.put(node.getHostName(), innerinfo); info.put(node.getHostName(), innerinfo);
} }
return JSON.toString(info); return JSON.toString(info);
@ -6308,13 +6348,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
final List<DatanodeDescriptor> decomNodeList = blockManager.getDatanodeManager( final List<DatanodeDescriptor> decomNodeList = blockManager.getDatanodeManager(
).getDecommissioningNodes(); ).getDecommissioningNodes();
for (DatanodeDescriptor node : decomNodeList) { for (DatanodeDescriptor node : decomNodeList) {
final Map<String, Object> innerinfo = new HashMap<String, Object>(); Map<String, Object> innerinfo = ImmutableMap
innerinfo.put("underReplicatedBlocks", node.decommissioningStatus .<String, Object> builder()
.getUnderReplicatedBlocks()); .put("xferaddr", node.getXferAddr())
innerinfo.put("decommissionOnlyReplicas", node.decommissioningStatus .put("underReplicatedBlocks",
.getDecommissionOnlyReplicas()); node.decommissioningStatus.getUnderReplicatedBlocks())
innerinfo.put("underReplicateInOpenFiles", node.decommissioningStatus .put("decommissionOnlyReplicas",
.getUnderReplicatedInOpenFiles()); node.decommissioningStatus.getDecommissionOnlyReplicas())
.put("underReplicateInOpenFiles",
node.decommissioningStatus.getUnderReplicatedInOpenFiles())
.build();
info.put(node.getHostName(), innerinfo); info.put(node.getHostName(), innerinfo);
} }
return JSON.toString(info); return JSON.toString(info);
@ -6504,11 +6547,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* Verifies that the given identifier and password are valid and match. * Verifies that the given identifier and password are valid and match.
* @param identifier Token identifier. * @param identifier Token identifier.
* @param password Password in the token. * @param password Password in the token.
* @throws InvalidToken
*/ */
public synchronized void verifyToken(DelegationTokenIdentifier identifier, public synchronized void verifyToken(DelegationTokenIdentifier identifier,
byte[] password) throws InvalidToken { byte[] password) throws InvalidToken, RetriableException {
getDelegationTokenSecretManager().verifyToken(identifier, password); try {
getDelegationTokenSecretManager().verifyToken(identifier, password);
} catch (InvalidToken it) {
if (inTransitionToActive()) {
throw new RetriableException(it);
}
throw it;
}
} }
@Override @Override
@ -6525,6 +6574,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return editLogTailer; return editLogTailer;
} }
@VisibleForTesting
public void setEditLogTailerForTests(EditLogTailer tailer) {
this.editLogTailer = tailer;
}
@VisibleForTesting @VisibleForTesting
void setFsLockForTests(ReentrantReadWriteLock lock) { void setFsLockForTests(ReentrantReadWriteLock lock) {
this.fsLock = lock; this.fsLock = lock;
@ -6560,10 +6614,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot allow snapshot for " + path);
throw new SafeModeException("Cannot allow snapshot for " + path,
safeMode);
}
checkSuperuserPrivilege(); checkSuperuserPrivilege();
dir.writeLock(); dir.writeLock();
@ -6588,10 +6639,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot disallow snapshot for " + path);
throw new SafeModeException("Cannot disallow snapshot for " + path,
safeMode);
}
checkSuperuserPrivilege(); checkSuperuserPrivilege();
dir.writeLock(); dir.writeLock();
@ -6618,20 +6666,18 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
*/ */
String createSnapshot(String snapshotRoot, String snapshotName) String createSnapshot(String snapshotRoot, String snapshotName)
throws SafeModeException, IOException { throws SafeModeException, IOException {
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
null); null);
if (cacheEntry != null && cacheEntry.isSuccess()) { if (cacheEntry != null && cacheEntry.isSuccess()) {
return (String) cacheEntry.getPayload(); return (String) cacheEntry.getPayload();
} }
final FSPermissionChecker pc = getPermissionChecker();
writeLock(); writeLock();
String snapshotPath = null; String snapshotPath = null;
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot create snapshot for " + snapshotRoot);
throw new SafeModeException("Cannot create snapshot for "
+ snapshotRoot, safeMode);
}
if (isPermissionEnabled) { if (isPermissionEnabled) {
checkOwner(pc, snapshotRoot); checkOwner(pc, snapshotRoot);
} }
@ -6670,19 +6716,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
*/ */
void renameSnapshot(String path, String snapshotOldName, void renameSnapshot(String path, String snapshotOldName,
String snapshotNewName) throws SafeModeException, IOException { String snapshotNewName) throws SafeModeException, IOException {
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) { if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response return; // Return previous response
} }
final FSPermissionChecker pc = getPermissionChecker();
writeLock(); writeLock();
boolean success = false; boolean success = false;
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot rename snapshot for " + path);
throw new SafeModeException("Cannot rename snapshot for " + path,
safeMode);
}
if (isPermissionEnabled) { if (isPermissionEnabled) {
checkOwner(pc, path); checkOwner(pc, path);
} }
@ -6715,10 +6759,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
public SnapshottableDirectoryStatus[] getSnapshottableDirListing() public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException { throws IOException {
SnapshottableDirectoryStatus[] status = null; SnapshottableDirectoryStatus[] status = null;
final FSPermissionChecker checker = getPermissionChecker();
readLock(); readLock();
try { try {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
FSPermissionChecker checker = getPermissionChecker();
final String user = checker.isSuperUser()? null : checker.getUser(); final String user = checker.isSuperUser()? null : checker.getUser();
status = snapshotManager.getSnapshottableDirListing(user); status = snapshotManager.getSnapshottableDirListing(user);
} finally { } finally {
@ -6786,21 +6830,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
*/ */
void deleteSnapshot(String snapshotRoot, String snapshotName) void deleteSnapshot(String snapshotRoot, String snapshotName)
throws SafeModeException, IOException { throws SafeModeException, IOException {
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) { if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response return; // Return previous response
} }
boolean success = false; boolean success = false;
checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { checkNameNodeSafeMode("Cannot delete snapshot for " + snapshotRoot);
throw new SafeModeException( if (isPermissionEnabled) {
"Cannot delete snapshot for " + snapshotRoot, safeMode); checkOwner(pc, snapshotRoot);
} }
checkOwner(pc, snapshotRoot);
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<INode>(); List<INode> removedINodes = new ChunkedArrayList<INode>();

View File

@ -57,9 +57,14 @@ public class FileChecksumServlets {
final String hostname = host instanceof DatanodeInfo final String hostname = host instanceof DatanodeInfo
? ((DatanodeInfo)host).getHostName() : host.getIpAddr(); ? ((DatanodeInfo)host).getHostName() : host.getIpAddr();
final String scheme = request.getScheme(); final String scheme = request.getScheme();
final int port = "https".equals(scheme) int port = host.getInfoPort();
? (Integer)getServletContext().getAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY) if ("https".equals(scheme)) {
: host.getInfoPort(); final Integer portObject = (Integer) getServletContext().getAttribute(
DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY);
if (portObject != null) {
port = portObject;
}
}
final String encodedPath = ServletUtil.getRawPath(request, "/fileChecksum"); final String encodedPath = ServletUtil.getRawPath(request, "/fileChecksum");
String dtParam = ""; String dtParam = "";

View File

@ -61,9 +61,14 @@ public class FileDataServlet extends DfsServlet {
} else { } else {
hostname = host.getIpAddr(); hostname = host.getIpAddr();
} }
final int port = "https".equals(scheme) int port = host.getInfoPort();
? (Integer)getServletContext().getAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY) if ("https".equals(scheme)) {
: host.getInfoPort(); final Integer portObject = (Integer) getServletContext().getAttribute(
DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY);
if (portObject != null) {
port = portObject;
}
}
String dtParam = ""; String dtParam = "";
if (dt != null) { if (dt != null) {

View File

@ -52,6 +52,7 @@ public class NameNodeHttpServer {
private final NameNode nn; private final NameNode nn;
private InetSocketAddress httpAddress; private InetSocketAddress httpAddress;
private InetSocketAddress httpsAddress;
private InetSocketAddress bindAddress; private InetSocketAddress bindAddress;
public static final String NAMENODE_ADDRESS_ATTRIBUTE_KEY = "name.node.address"; public static final String NAMENODE_ADDRESS_ATTRIBUTE_KEY = "name.node.address";
@ -99,14 +100,15 @@ public class NameNodeHttpServer {
boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false); boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
if (certSSL) { if (certSSL) {
boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(infoHost + ":" + conf.get( httpsAddress = NetUtils.createSocketAddr(conf.get(
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, "0")); DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
Configuration sslConf = new Configuration(false); Configuration sslConf = new Configuration(false);
if (certSSL) { sslConf.addResource(conf.get(
sslConf.addResource(conf.get(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
"ssl-server.xml")); DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
} httpServer.addSslListener(httpsAddress, sslConf, needClientAuth);
httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
// assume same ssl port for all datanodes // assume same ssl port for all datanodes
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get( InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475)); DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475));
@ -163,6 +165,10 @@ public class NameNodeHttpServer {
return httpAddress; return httpAddress;
} }
public InetSocketAddress getHttpsAddress() {
return httpsAddress;
}
/** /**
* Sets fsimage for use by servlets. * Sets fsimage for use by servlets.
* *

View File

@ -28,12 +28,7 @@ import java.net.InetSocketAddress;
import java.net.URI; import java.net.URI;
import java.net.URLEncoder; import java.net.URLEncoder;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.*;
import java.util.Arrays;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import javax.servlet.ServletContext; import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequest;
@ -211,6 +206,9 @@ class NamenodeJspHelper {
static void generateSnapshotReport(JspWriter out, FSNamesystem fsn) static void generateSnapshotReport(JspWriter out, FSNamesystem fsn)
throws IOException { throws IOException {
if (fsn == null) {
return;
}
out.println("<div id=\"snapshotstats\"><div class=\"dfstable\">" out.println("<div id=\"snapshotstats\"><div class=\"dfstable\">"
+ "<table class=\"storage\" title=\"Snapshot Summary\">\n" + "<table class=\"storage\" title=\"Snapshot Summary\">\n"
+ "<thead><tr><td><b>Snapshottable directories</b></td>" + "<thead><tr><td><b>Snapshottable directories</b></td>"
@ -653,25 +651,22 @@ class NamenodeJspHelper {
.getAttribute(JspHelper.CURRENT_CONF); .getAttribute(JspHelper.CURRENT_CONF);
// We can't redirect if there isn't a DN to redirect to. // We can't redirect if there isn't a DN to redirect to.
// Lets instead show a proper error message. // Lets instead show a proper error message.
if (nn.getNamesystem().getNumLiveDataNodes() < 1) { FSNamesystem fsn = nn.getNamesystem();
DatanodeID datanode = null;
if (fsn != null && fsn.getNumLiveDataNodes() >= 1) {
datanode = getRandomDatanode(nn);
}
if (datanode == null) {
throw new IOException("Can't browse the DFS since there are no " + throw new IOException("Can't browse the DFS since there are no " +
"live nodes available to redirect to."); "live nodes available to redirect to.");
} }
final DatanodeID datanode = getRandomDatanode(nn);;
UserGroupInformation ugi = JspHelper.getUGI(context, request, conf); UserGroupInformation ugi = JspHelper.getUGI(context, request, conf);
// if the user is defined, get a delegation token and stringify it
String tokenString = getDelegationToken( String tokenString = getDelegationToken(
nn.getRpcServer(), request, conf, ugi); nn.getRpcServer(), request, conf, ugi);
// if the user is defined, get a delegation token and stringify it
final String redirectLocation;
final String nodeToRedirect;
int redirectPort;
if (datanode != null) {
nodeToRedirect = datanode.getIpAddr();
redirectPort = datanode.getInfoPort();
} else {
nodeToRedirect = nn.getHttpAddress().getHostName();
redirectPort = nn.getHttpAddress().getPort();
}
InetSocketAddress rpcAddr = nn.getNameNodeAddress(); InetSocketAddress rpcAddr = nn.getNameNodeAddress();
String rpcHost = rpcAddr.getAddress().isAnyLocalAddress() String rpcHost = rpcAddr.getAddress().isAnyLocalAddress()
@ -679,16 +674,31 @@ class NamenodeJspHelper {
: rpcAddr.getAddress().getHostAddress(); : rpcAddr.getAddress().getHostAddress();
String addr = rpcHost + ":" + rpcAddr.getPort(); String addr = rpcHost + ":" + rpcAddr.getPort();
String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName(); final String redirectLocation =
redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" + redirectPort JspHelper.Url.url(request.getScheme(), datanode)
+ "/browseDirectory.jsp?namenodeInfoPort=" + "/browseDirectory.jsp?namenodeInfoPort="
+ nn.getHttpAddress().getPort() + "&dir=/" + request.getServerPort() + "&dir=/"
+ (tokenString == null ? "" : + (tokenString == null ? "" :
JspHelper.getDelegationTokenUrlParam(tokenString)) JspHelper.getDelegationTokenUrlParam(tokenString))
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr); + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
resp.sendRedirect(redirectLocation); resp.sendRedirect(redirectLocation);
} }
/**
* Returns a descriptive label for the running NameNode. If the NameNode has
* initialized to the point of running its RPC server, then this label consists
* of the host and port of the RPC server. Otherwise, the label is a message
* stating that the NameNode is still initializing.
*
* @param nn NameNode to describe
* @return String NameNode label
*/
static String getNameNodeLabel(NameNode nn) {
return nn.getRpcServer() != null ? nn.getNameNodeAddressHostPortString() :
"initializing";
}
static class NodeListJsp { static class NodeListJsp {
private int rowNum = 0; private int rowNum = 0;
@ -726,12 +736,11 @@ class NamenodeJspHelper {
} }
private void generateNodeDataHeader(JspWriter out, DatanodeDescriptor d, private void generateNodeDataHeader(JspWriter out, DatanodeDescriptor d,
String suffix, boolean alive, int nnHttpPort, String nnaddr) String suffix, boolean alive, int nnInfoPort, String nnaddr, String scheme)
throws IOException { throws IOException {
// from nn_browsedfscontent.jsp: // from nn_browsedfscontent.jsp:
String url = HttpConfig.getSchemePrefix() + d.getHostName() + ":" String url = "///" + JspHelper.Url.authority(scheme, d)
+ d.getInfoPort() + "/browseDirectory.jsp?namenodeInfoPort=" + nnInfoPort + "&dir="
+ "/browseDirectory.jsp?namenodeInfoPort=" + nnHttpPort + "&dir="
+ URLEncoder.encode("/", "UTF-8") + URLEncoder.encode("/", "UTF-8")
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnaddr); + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnaddr);
@ -748,9 +757,9 @@ class NamenodeJspHelper {
} }
void generateDecommissioningNodeData(JspWriter out, DatanodeDescriptor d, void generateDecommissioningNodeData(JspWriter out, DatanodeDescriptor d,
String suffix, boolean alive, int nnHttpPort, String nnaddr) String suffix, boolean alive, int nnInfoPort, String nnaddr, String scheme)
throws IOException { throws IOException {
generateNodeDataHeader(out, d, suffix, alive, nnHttpPort, nnaddr); generateNodeDataHeader(out, d, suffix, alive, nnInfoPort, nnaddr, scheme);
if (!alive) { if (!alive) {
return; return;
} }
@ -774,7 +783,7 @@ class NamenodeJspHelper {
} }
void generateNodeData(JspWriter out, DatanodeDescriptor d, String suffix, void generateNodeData(JspWriter out, DatanodeDescriptor d, String suffix,
boolean alive, int nnHttpPort, String nnaddr) throws IOException { boolean alive, int nnInfoPort, String nnaddr, String scheme) throws IOException {
/* /*
* Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5 we use: * Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5 we use:
* 1) d.getHostName():d.getPort() to display. Domain and port are stripped * 1) d.getHostName():d.getPort() to display. Domain and port are stripped
@ -786,7 +795,7 @@ class NamenodeJspHelper {
* interact with datanodes. * interact with datanodes.
*/ */
generateNodeDataHeader(out, d, suffix, alive, nnHttpPort, nnaddr); generateNodeDataHeader(out, d, suffix, alive, nnInfoPort, nnaddr, scheme);
long currentTime = Time.now(); long currentTime = Time.now();
long timestamp = d.getLastUpdate(); long timestamp = d.getLastUpdate();
if (!alive) { if (!alive) {
@ -844,17 +853,17 @@ class NamenodeJspHelper {
HttpServletRequest request) throws IOException { HttpServletRequest request) throws IOException {
final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
final FSNamesystem ns = nn.getNamesystem(); final FSNamesystem ns = nn.getNamesystem();
if (ns == null) {
return;
}
final DatanodeManager dm = ns.getBlockManager().getDatanodeManager(); final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>(); final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
dm.fetchDatanodes(live, dead, true); dm.fetchDatanodes(live, dead, true);
InetSocketAddress nnSocketAddress = String nnaddr = nn.getServiceRpcAddress().getAddress().getHostName() + ":"
(InetSocketAddress)context.getAttribute( + nn.getServiceRpcAddress().getPort();
NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
String nnaddr = nnSocketAddress.getAddress().getHostAddress() + ":"
+ nnSocketAddress.getPort();
whatNodes = request.getParameter("whatNodes"); // show only live or only whatNodes = request.getParameter("whatNodes"); // show only live or only
// dead nodes // dead nodes
@ -890,16 +899,11 @@ class NamenodeJspHelper {
counterReset(); counterReset();
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
if (live.isEmpty() && dead.isEmpty()) { if (live.isEmpty() && dead.isEmpty()) {
out.print("There are no datanodes in the cluster"); out.print("There are no datanodes in the cluster");
} else { } else {
int nnHttpPort = nn.getHttpAddress().getPort(); int nnInfoPort = request.getServerPort();
out.print("<div id=\"dfsnodetable\"> "); out.print("<div id=\"dfsnodetable\"> ");
if (whatNodes.equals("LIVE")) { if (whatNodes.equals("LIVE")) {
out.print("<a name=\"LiveNodes\" id=\"title\">" + "Live Datanodes : " out.print("<a name=\"LiveNodes\" id=\"title\">" + "Live Datanodes : "
@ -941,8 +945,8 @@ class NamenodeJspHelper {
JspHelper.sortNodeList(live, sorterField, sorterOrder); JspHelper.sortNodeList(live, sorterField, sorterOrder);
for (int i = 0; i < live.size(); i++) { for (int i = 0; i < live.size(); i++) {
generateNodeData(out, live.get(i), port_suffix, true, nnHttpPort, generateNodeData(out, live.get(i), port_suffix, true, nnInfoPort,
nnaddr); nnaddr, request.getScheme());
} }
} }
out.print("</table>\n"); out.print("</table>\n");
@ -964,7 +968,7 @@ class NamenodeJspHelper {
JspHelper.sortNodeList(dead, sorterField, sorterOrder); JspHelper.sortNodeList(dead, sorterField, sorterOrder);
for (int i = 0; i < dead.size(); i++) { for (int i = 0; i < dead.size(); i++) {
generateNodeData(out, dead.get(i), port_suffix, false, generateNodeData(out, dead.get(i), port_suffix, false,
nnHttpPort, nnaddr); nnInfoPort, nnaddr, request.getScheme());
} }
out.print("</table>\n"); out.print("</table>\n");
@ -995,7 +999,7 @@ class NamenodeJspHelper {
JspHelper.sortNodeList(decommissioning, "name", "ASC"); JspHelper.sortNodeList(decommissioning, "name", "ASC");
for (int i = 0; i < decommissioning.size(); i++) { for (int i = 0; i < decommissioning.size(); i++) {
generateDecommissioningNodeData(out, decommissioning.get(i), generateDecommissioningNodeData(out, decommissioning.get(i),
port_suffix, true, nnHttpPort, nnaddr); port_suffix, true, nnInfoPort, nnaddr, request.getScheme());
} }
out.print("</table>\n"); out.print("</table>\n");
} }
@ -1023,14 +1027,16 @@ class NamenodeJspHelper {
final BlockManager blockManager; final BlockManager blockManager;
XMLBlockInfo(FSNamesystem fsn, Long blockId) { XMLBlockInfo(FSNamesystem fsn, Long blockId) {
this.blockManager = fsn.getBlockManager(); this.blockManager = fsn != null ? fsn.getBlockManager() : null;
if (blockId == null) { if (blockId == null) {
this.block = null; this.block = null;
this.inode = null; this.inode = null;
} else { } else {
this.block = new Block(blockId); this.block = new Block(blockId);
this.inode = ((INode)blockManager.getBlockCollection(block)).asFile(); this.inode = blockManager != null ?
((INode)blockManager.getBlockCollection(block)).asFile() :
null;
} }
} }
@ -1104,7 +1110,9 @@ class NamenodeJspHelper {
} }
doc.startTag("replicas"); doc.startTag("replicas");
for(DatanodeStorageInfo storage : blockManager.getStorages(block)) { for(DatanodeStorageInfo storage : (blockManager != null ?
blockManager.getStorages(block) :
Collections.<DatanodeStorageInfo>emptyList())) {
doc.startTag("replica"); doc.startTag("replica");
DatanodeDescriptor dd = storage.getDatanodeDescriptor(); DatanodeDescriptor dd = storage.getDatanodeDescriptor();
@ -1140,7 +1148,7 @@ class NamenodeJspHelper {
XMLCorruptBlockInfo(FSNamesystem fsn, Configuration conf, XMLCorruptBlockInfo(FSNamesystem fsn, Configuration conf,
int numCorruptBlocks, Long startingBlockId) { int numCorruptBlocks, Long startingBlockId) {
this.blockManager = fsn.getBlockManager(); this.blockManager = fsn != null ? fsn.getBlockManager() : null;
this.conf = conf; this.conf = conf;
this.numCorruptBlocks = numCorruptBlocks; this.numCorruptBlocks = numCorruptBlocks;
this.startingBlockId = startingBlockId; this.startingBlockId = startingBlockId;
@ -1163,16 +1171,19 @@ class NamenodeJspHelper {
doc.endTag(); doc.endTag();
doc.startTag("num_missing_blocks"); doc.startTag("num_missing_blocks");
doc.pcdata(""+blockManager.getMissingBlocksCount()); doc.pcdata("" + (blockManager != null ?
blockManager.getMissingBlocksCount() : 0));
doc.endTag(); doc.endTag();
doc.startTag("num_corrupt_replica_blocks"); doc.startTag("num_corrupt_replica_blocks");
doc.pcdata(""+blockManager.getCorruptReplicaBlocksCount()); doc.pcdata("" + (blockManager != null ?
blockManager.getCorruptReplicaBlocksCount() : 0));
doc.endTag(); doc.endTag();
doc.startTag("corrupt_replica_block_ids"); doc.startTag("corrupt_replica_block_ids");
final long[] corruptBlockIds = blockManager.getCorruptReplicaBlockIds( final long[] corruptBlockIds = blockManager != null ?
numCorruptBlocks, startingBlockId); blockManager.getCorruptReplicaBlockIds(numCorruptBlocks,
startingBlockId) : null;
if (corruptBlockIds != null) { if (corruptBlockIds != null) {
for (Long blockId: corruptBlockIds) { for (Long blockId: corruptBlockIds) {
doc.startTag("block_id"); doc.startTag("block_id");

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.util.RwLock; import org.apache.hadoop.hdfs.util.RwLock;
import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.ipc.StandbyException;
@ -43,4 +44,6 @@ public interface Namesystem extends RwLock, SafeMode {
public void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal); public void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal);
public void checkOperation(OperationCategory read) throws StandbyException; public void checkOperation(OperationCategory read) throws StandbyException;
public boolean isInSnapshot(BlockInfoUnderConstruction blockUC);
} }

View File

@ -33,10 +33,7 @@ import org.apache.hadoop.classification.InterfaceStability;
public class SafeModeException extends IOException { public class SafeModeException extends IOException {
private static final long serialVersionUID = 1L; private static final long serialVersionUID = 1L;
public SafeModeException() {}
public SafeModeException(String text, FSNamesystem.SafeModeInfo mode ) { public SafeModeException(String text, FSNamesystem.SafeModeInfo mode ) {
super(text + ". Name node is in safe mode.\n" + mode.getTurnOffTip()); super(text + ". Name node is in safe mode.\n" + mode.getTurnOffTip());
} }
}
}

Some files were not shown because too many files have changed in this diff Show More