HADOOP-13419. Fix javadoc warnings by JDK8 in hadoop-common package. Contributed by Kai Sasaki.

This commit is contained in:
Masatake Iwasaki 2016-08-16 13:30:40 +09:00
parent 4d4d95fdd5
commit b8a446ba57
5 changed files with 28 additions and 73 deletions

View File

@ -303,7 +303,7 @@ static void processDeleteOnExit() {
*
* @throws UnsupportedFileSystemException If the file system for
* <code>absOrFqPath</code> is not supported.
* @throws IOExcepton If the file system for <code>absOrFqPath</code> could
* @throws IOException If the file system for <code>absOrFqPath</code> could
* not be instantiated.
*/
protected AbstractFileSystem getFSofPath(final Path absOrFqPath)
@ -2713,7 +2713,7 @@ public Void next(final AbstractFileSystem fs, final Path p)
/**
* Query the effective storage policy ID for the given file or directory.
*
* @param src file or directory path.
* @param path file or directory path.
* @return storage policy for give file.
* @throws IOException
*/

View File

@ -15,6 +15,28 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A mechanism for selectively retrying methods that throw exceptions under
* certain circumstances.
* Typical usage is
* UnreliableImplementation unreliableImpl = new UnreliableImplementation();
* UnreliableInterface unreliable = (UnreliableInterface)
* RetryProxy.create(UnreliableInterface.class, unreliableImpl,
* RetryPolicies.retryUpToMaximumCountWithFixedSleep(4, 10,
* TimeUnit.SECONDS));
* unreliable.call();
*
* This will retry any method called on <code>unreliable</code> four times -
* in this case the <code>call()</code> method - sleeping 10 seconds between
* each retry. There are a number of
* {@link org.apache.hadoop.io.retry.RetryPolicies retry policies}
* available, or you can implement a custom one by implementing
* {@link org.apache.hadoop.io.retry.RetryPolicy}.
* It is also possible to specify retry policies on a
* {@link org.apache.hadoop.io.retry.RetryProxy#create(Class, Object, Map)
* per-method basis}.
*/
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
@InterfaceStability.Evolving
package org.apache.hadoop.io.retry;

View File

@ -1,48 +0,0 @@
<html>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<body>
<p>
A mechanism for selectively retrying methods that throw exceptions under certain circumstances.
</p>
<p>
Typical usage is
</p>
<pre>
UnreliableImplementation unreliableImpl = new UnreliableImplementation();
UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
RetryPolicies.retryUpToMaximumCountWithFixedSleep(4, 10, TimeUnit.SECONDS));
unreliable.call();
</pre>
<p>
This will retry any method called on <code>unreliable</code> four times - in this case the <code>call()</code>
method - sleeping 10 seconds between
each retry. There are a number of {@link org.apache.hadoop.io.retry.RetryPolicies retry policies}
available, or you can implement a custom one by implementing {@link org.apache.hadoop.io.retry.RetryPolicy}.
It is also possible to specify retry policies on a
{@link org.apache.hadoop.io.retry.RetryProxy#create(Class, Object, Map) per-method basis}.
</p>
</body>
</html>

View File

@ -15,6 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Tools to help define network clients and servers.
*/
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
@InterfaceStability.Evolving
package org.apache.hadoop.ipc;

View File

@ -1,23 +0,0 @@
<html>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<body>
Tools to help define network clients and servers.
</body>
</html>