HDFS-13436. Fix javadoc of package-info.java

(cherry picked from commit 9b0b9f2af2)
This commit is contained in:
Akira Ajisaka 2018-04-13 13:23:44 +09:00
parent a375fe8263
commit 5531c31f16
No known key found for this signature in database
GPG Key ID: C1EDBB9CA400FD50
6 changed files with 14 additions and 14 deletions

View File

@ -15,10 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
@InterfaceStability.Evolving
package org.apache.hadoop.hdfs.protocol.datatransfer;
import org.apache.hadoop.classification.InterfaceStability;
/** /**
* This package contains classes related to hdfs data transfer protocol. * This package contains classes related to hdfs data transfer protocol.
*/ */
@InterfaceStability.Evolving
package org.apache.hadoop.hdfs.protocol.datatransfer;
import org.apache.hadoop.classification.InterfaceStability;

View File

@ -15,13 +15,14 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
@InterfaceAudience.Public
@InterfaceStability.Unstable
package org.apache.hadoop.hdfs.server.common.blockaliasmap;
/** /**
* The AliasMap defines mapping of PROVIDED HDFS blocks to data in remote * The AliasMap defines mapping of PROVIDED HDFS blocks to data in remote
* storage systems. * storage systems.
*/ */
@InterfaceAudience.Public
@InterfaceStability.Unstable
package org.apache.hadoop.hdfs.server.common.blockaliasmap;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;

View File

@ -15,8 +15,6 @@
* the License. * the License.
*/ */
package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
/** /**
* Connectors package is a set of logical connectors that connect * Connectors package is a set of logical connectors that connect
* to various data sources to read the hadoop cluster information. * to various data sources to read the hadoop cluster information.
@ -35,3 +33,4 @@ package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
* we can crate dataNodes on the fly and attach to this connector and * we can crate dataNodes on the fly and attach to this connector and
* ask the diskBalancer Cluster to read data from this source. * ask the diskBalancer Cluster to read data from this source.
*/ */
package org.apache.hadoop.hdfs.server.diskbalancer.connectors;

View File

@ -16,7 +16,6 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
/** /**
* Disk Balancer Data Model is the Data Model for the cluster that * Disk Balancer Data Model is the Data Model for the cluster that
* Disk Balancer is working against. This information is read * Disk Balancer is working against. This information is read
@ -29,3 +28,4 @@ package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
* DiskBalancerVolumeSets is a collection of DiskBalancerVolumes * DiskBalancerVolumeSets is a collection of DiskBalancerVolumes
* DiskBalancerVolumes represents actual volumes on DataNodes. * DiskBalancerVolumes represents actual volumes on DataNodes.
*/ */
package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;

View File

@ -16,7 +16,6 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.diskbalancer;
/** /**
* Disk Balancer connects to a {@link org.apache.hadoop.hdfs.server.datanode * Disk Balancer connects to a {@link org.apache.hadoop.hdfs.server.datanode
* .DataNode} and attempts to spread data across all volumes evenly. * .DataNode} and attempts to spread data across all volumes evenly.
@ -34,3 +33,4 @@ package org.apache.hadoop.hdfs.server.diskbalancer;
* 3) Disk Balancer operates against data nodes which are live and operational. * 3) Disk Balancer operates against data nodes which are live and operational.
* *
*/ */
package org.apache.hadoop.hdfs.server.diskbalancer;

View File

@ -16,7 +16,6 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.diskbalancer.planner;
/** /**
* Planner takes a DiskBalancerVolumeSet, threshold and * Planner takes a DiskBalancerVolumeSet, threshold and
* computes a series of steps that lead to an even data * computes a series of steps that lead to an even data
@ -44,3 +43,4 @@ package org.apache.hadoop.hdfs.server.diskbalancer.planner;
* //when we are done , return the list of steps * //when we are done , return the list of steps
* return planner; * return planner;
*/ */
package org.apache.hadoop.hdfs.server.diskbalancer.planner;