diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a6ef0d57cfd..3105bb47c55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -216,6 +216,9 @@ Release 2.8.0 - UNRELEASED
HDFS-6184. Capture NN's thread dump when it fails over.
(Ming Ma via aajisaka)
+ HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff.
+ (Brahma Reddy Battula via aajisaka)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css
deleted file mode 100644
index 5eef2412b6a..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements. See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License. You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-/**
- * General
- */
-
-img { border: 0; }
-
-#content table {
- border: 0;
- width: 100%;
-}
-/*Hack to get IE to render the table at 100%*/
-* html #content table { margin-left: -3px; }
-
-#content th,
-#content td {
- margin: 0;
- padding: 0;
- vertical-align: top;
-}
-
-.clearboth {
- clear: both;
-}
-
-.note, .warning, .fixme {
- border: solid black 1px;
- margin: 1em 3em;
-}
-
-.note .label {
- background: #369;
- color: white;
- font-weight: bold;
- padding: 5px 10px;
-}
-.note .content {
- background: #F0F0FF;
- color: black;
- line-height: 120%;
- font-size: 90%;
- padding: 5px 10px;
-}
-.warning .label {
- background: #C00;
- color: white;
- font-weight: bold;
- padding: 5px 10px;
-}
-.warning .content {
- background: #FFF0F0;
- color: black;
- line-height: 120%;
- font-size: 90%;
- padding: 5px 10px;
-}
-.fixme .label {
- background: #C6C600;
- color: black;
- font-weight: bold;
- padding: 5px 10px;
-}
-.fixme .content {
- padding: 5px 10px;
-}
-
-/**
- * Typography
- */
-
-body {
- font-family: verdana, "Trebuchet MS", arial, helvetica, sans-serif;
- font-size: 100%;
-}
-
-#content {
- font-family: Georgia, Palatino, Times, serif;
- font-size: 95%;
-}
-#tabs {
- font-size: 70%;
-}
-#menu {
- font-size: 80%;
-}
-#footer {
- font-size: 70%;
-}
-
-h1, h2, h3, h4, h5, h6 {
- font-family: "Trebuchet MS", verdana, arial, helvetica, sans-serif;
- font-weight: bold;
- margin-top: 1em;
- margin-bottom: .5em;
-}
-
-h1 {
- margin-top: 0;
- margin-bottom: 1em;
- font-size: 1.4em;
- background-color: 73CAFF
-}
-#content h1 {
- font-size: 160%;
- margin-bottom: .5em;
-}
-#menu h1 {
- margin: 0;
- padding: 10px;
- background: #336699;
- color: white;
-}
-h2 {
- font-size: 120%;
- background-color: 73CAFF
-}
-h3 { font-size: 100%; }
-h4 { font-size: 90%; }
-h5 { font-size: 80%; }
-h6 { font-size: 75%; }
-
-p {
- line-height: 120%;
- text-align: left;
- margin-top: .5em;
- margin-bottom: 1em;
-}
-
-#content li,
-#content th,
-#content td,
-#content li ul,
-#content li ol{
- margin-top: .5em;
- margin-bottom: .5em;
-}
-
-
-#content li li,
-#minitoc-area li{
- margin-top: 0em;
- margin-bottom: 0em;
-}
-
-#content .attribution {
- text-align: right;
- font-style: italic;
- font-size: 85%;
- margin-top: 1em;
-}
-
-.codefrag {
- font-family: "Courier New", Courier, monospace;
- font-size: 110%;
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesSimpleStyle.css b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesSimpleStyle.css
deleted file mode 100644
index 407d0f1cf6d..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesSimpleStyle.css
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements. See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License. You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-body {
- font-family: Courier New, monospace;
- font-size: 10pt;
-}
-
-h1 {
- font-family: Courier New, monospace;
- font-size: 10pt;
-}
-
-h2 {
- font-family: Courier New, monospace;
- font-size: 10pt;
-}
-
-h3 {
- font-family: Courier New, monospace;
- font-size: 10pt;
-}
-
-a:link {
- color: blue;
-}
-
-a:visited {
- color: purple;
-}
-
-li {
- margin-top: 1em;
- margin-bottom: 1em;
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/changes2html.pl b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/changes2html.pl
deleted file mode 100755
index 67e1826e116..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/changes2html.pl
+++ /dev/null
@@ -1,282 +0,0 @@
-#!/usr/bin/perl
-#
-# Transforms Lucene Java's CHANGES.txt into Changes.html
-#
-# Input is on STDIN, output is to STDOUT
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-use strict;
-use warnings;
-
-my $jira_url_prefix = 'http://issues.apache.org/jira/browse/';
-my $title = undef;
-my $release = undef;
-my $sections = undef;
-my $items = undef;
-my $first_relid = undef;
-my $second_relid = undef;
-my @releases = ();
-
-my @lines = <>; # Get all input at once
-
-#
-# Parse input and build hierarchical release structure in @releases
-#
-for (my $line_num = 0 ; $line_num <= $#lines ; ++$line_num) {
- $_ = $lines[$line_num];
- next unless (/\S/); # Skip blank lines
-
- unless ($title) {
- if (/\S/) {
- s/^\s+//; # Trim leading whitespace
- s/\s+$//; # Trim trailing whitespace
- }
- $title = $_;
- next;
- }
-
- if (/^(Release)|(Trunk)/) { # Release headings
- $release = $_;
- $sections = [];
- push @releases, [ $release, $sections ];
- ($first_relid = lc($release)) =~ s/\s+/_/g if ($#releases == 0);
- ($second_relid = lc($release)) =~ s/\s+/_/g if ($#releases == 1);
- $items = undef;
- next;
- }
-
- # Section heading: 2 leading spaces, words all capitalized
- if (/^ ([A-Z]+)\s*/) {
- my $heading = $_;
- $items = [];
- push @$sections, [ $heading, $items ];
- next;
- }
-
- # Handle earlier releases without sections - create a headless section
- unless ($items) {
- $items = [];
- push @$sections, [ undef, $items ];
- }
-
- my $type;
- if (@$items) { # A list item has been encountered in this section before
- $type = $items->[0]; # 0th position of items array is list type
- } else {
- $type = get_list_type($_);
- push @$items, $type;
- }
-
- if ($type eq 'numbered') { # The modern items list style
- # List item boundary is another numbered item or an unindented line
- my $line;
- my $item = $_;
- $item =~ s/^(\s{0,2}\d+\.\s*)//; # Trim the leading item number
- my $leading_ws_width = length($1);
- $item =~ s/\s+$//; # Trim trailing whitespace
- $item .= "\n";
-
- while ($line_num < $#lines
- and ($line = $lines[++$line_num]) !~ /^(?:\s{0,2}\d+\.\s*\S|\S)/) {
- $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
- $line =~ s/\s+$//; # Trim trailing whitespace
- $item .= "$line\n";
- }
- $item =~ s/\n+\Z/\n/; # Trim trailing blank lines
- push @$items, $item;
- --$line_num unless ($line_num == $#lines);
- } elsif ($type eq 'paragraph') { # List item boundary is a blank line
- my $line;
- my $item = $_;
- $item =~ s/^(\s+)//;
- my $leading_ws_width = defined($1) ? length($1) : 0;
- $item =~ s/\s+$//; # Trim trailing whitespace
- $item .= "\n";
-
- while ($line_num < $#lines and ($line = $lines[++$line_num]) =~ /\S/) {
- $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
- $line =~ s/\s+$//; # Trim trailing whitespace
- $item .= "$line\n";
- }
- push @$items, $item;
- --$line_num unless ($line_num == $#lines);
- } else { # $type is one of the bulleted types
- # List item boundary is another bullet or a blank line
- my $line;
- my $item = $_;
- $item =~ s/^(\s*$type\s*)//; # Trim the leading bullet
- my $leading_ws_width = length($1);
- $item =~ s/\s+$//; # Trim trailing whitespace
- $item .= "\n";
-
- while ($line_num < $#lines
- and ($line = $lines[++$line_num]) !~ /^\s*(?:$type|\Z)/) {
- $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
- $line =~ s/\s+$//; # Trim trailing whitespace
- $item .= "$line\n";
- }
- push @$items, $item;
- --$line_num unless ($line_num == $#lines);
- }
-}
-
-#
-# Print HTML-ified version to STDOUT
-#
-print<<"__HTML_HEADER__";
-
-
-
- $title
-
-
-
-
-
-
-
-
-$title
-
-__HTML_HEADER__
-
-my $heading;
-my $relcnt = 0;
-my $header = 'h2';
-for my $rel (@releases) {
- if (++$relcnt == 3) {
- $header = 'h3';
- print "\n";
- print "\n"
- }
-
- ($release, $sections) = @$rel;
-
- # The first section heading is undefined for the older sectionless releases
- my $has_release_sections = $sections->[0][0];
-
- (my $relid = lc($release)) =~ s/\s+/_/g;
- print "<$header>";
- print "$release";
- print " $header>\n";
- print "\n"
- if ($has_release_sections);
-
- for my $section (@$sections) {
- ($heading, $items) = @$section;
- (my $sectid = lc($heading)) =~ s/\s+/_/g;
- my $numItemsStr = $#{$items} > 0 ? "($#{$items})" : "(none)";
-
- print " ",
- ($heading || ''), " $numItemsStr\n"
- if ($has_release_sections);
-
- my $list_type = $items->[0] || '';
- my $list = ($has_release_sections || $list_type eq 'numbered' ? 'ol' : 'ul');
- my $listid = $sectid ? "$relid.$sectid" : $relid;
- print " <$list id=\"$listid\">\n";
-
- for my $itemnum (1..$#{$items}) {
- my $item = $items->[$itemnum];
- $item =~ s:&:&:g; # Escape HTML metachars
- $item =~ s:<:<:g;
- $item =~ s:>:>:g;
-
- $item =~ s:\s*(\([^)"]+?\))\s*$: $1:; # Separate attribution
- $item =~ s:\n{2,}:\n
\n:g; # Keep paragraph breaks
- $item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA
- {$1 }g;
- print " $item \n";
- }
- print " $list>\n";
- print " \n" if ($has_release_sections);
- }
- print " \n" if ($has_release_sections);
-}
-print " \n" if ($relcnt > 3);
-print "\n\n";
-
-
-#
-# Subroutine: get_list_type
-#
-# Takes one parameter:
-#
-# - The first line of a sub-section/point
-#
-# Returns one scalar:
-#
-# - The list type: 'numbered'; or one of the bulleted types '-', or '.' or
-# 'paragraph'.
-#
-sub get_list_type {
- my $first_list_item_line = shift;
- my $type = 'paragraph'; # Default to paragraph type
-
- if ($first_list_item_line =~ /^\s{0,2}\d+\.\s+\S+/) {
- $type = 'numbered';
- } elsif ($first_list_item_line =~ /^\s*([-.])\s+\S+/) {
- $type = $1;
- }
- return $type;
-}
-
-1;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/releasenotes.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/releasenotes.html
deleted file mode 100644
index 3557e06334c..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/releasenotes.html
+++ /dev/null
@@ -1 +0,0 @@
-THIS IS A PLACEHOLDER. REAL RELEASE NOTES WILL BE ADDED TO THIS FILE IN RELEASE BRANCHES.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/README.txt b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/README.txt
deleted file mode 100644
index 9bc261b2f15..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/README.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-This is the base documentation directory.
-
-skinconf.xml # This file customizes Forrest for your project. In it, you
- # tell forrest the project name, logo, copyright info, etc
-
-sitemap.xmap # Optional. This sitemap is consulted before all core sitemaps.
- # See http://forrest.apache.org/docs/project-sitemap.html
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/classes/CatalogManager.properties b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/classes/CatalogManager.properties
deleted file mode 100644
index b9cb5848fbf..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/classes/CatalogManager.properties
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#=======================================================================
-# CatalogManager.properties
-#
-# This is the default properties file for Apache Forrest.
-# This facilitates local configuration of application-specific catalogs.
-#
-# See the Apache Forrest documentation:
-# http://forrest.apache.org/docs/your-project.html
-# http://forrest.apache.org/docs/validation.html
-
-# verbosity ... level of messages for status/debug
-# See forrest/src/core/context/WEB-INF/cocoon.xconf
-
-# catalogs ... list of additional catalogs to load
-# (Note that Apache Forrest will automatically load its own default catalog
-# from src/core/context/resources/schema/catalog.xcat)
-# use full pathnames
-# pathname separator is always semi-colon (;) regardless of operating system
-# directory separator is always slash (/) regardless of operating system
-#
-#catalogs=/home/me/forrest/my-site/src/documentation/resources/schema/catalog.xcat
-catalogs=
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/conf/cli.xconf b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/conf/cli.xconf
deleted file mode 100644
index 5c6e245688c..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/conf/cli.xconf
+++ /dev/null
@@ -1,327 +0,0 @@
-
-
-
-
-
-
-
- .
- WEB-INF/cocoon.xconf
- ../tmp/cocoon-work
- ../site
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- index.html
-
-
-
-
-
-
- */*
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/index.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/index.xml
deleted file mode 100644
index ff516c55da3..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/index.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-
-
-
-
-
-
-
-
-
-
- The HDFS Documentation provides the information you need to get started using the Hadoop Distributed File System.
- Begin with the HDFS Users Guide to obtain an overview of the system and then
- move on to the HDFS Architecture Guide for more detailed information.
-
-
-
- HDFS commonly works in tandem with a cluster environment and MapReduce applications.
- For information about Hadoop clusters (single or multi node) see the
- Hadoop Common Documentation .
- For information about MapReduce see the
- MapReduce Documentation .
-
-
-
-If you have more questions, you can ask on the HDFS Mailing Lists or browse the Mailing List Archives .
-
-
-
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/site.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/site.xml
deleted file mode 100644
index ffb32198333..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/site.xml
+++ /dev/null
@@ -1,289 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/tabs.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/tabs.xml
deleted file mode 100644
index 200370d6522..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/tabs.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
deleted file mode 100644
index c8e0c627dc1..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
+++ /dev/null
@@ -1,1577 +0,0 @@
-
-
-
-
-
-
-
-
-
-
- Document Conventions
-
-Monospaced
Used for commands, HTTP request and responses and code blocks.
-<Monospaced>
User entered values.
-[Monospaced]
Optional values. When the value is not specified, the default value is used.
-Italics Important phrases and words.
-
-
-
-
- Introduction
-
- The HTTP REST API supports the complete FileSystem interface for HDFS.
- The operations and the corresponding FileSystem methods are shown in the next section.
- The Section HTTP Query Parameter Dictionary specifies the parameter details
- such as the defaults and the valid values.
-
-
- Operations
-
- HTTP GET
-
- HTTP PUT
-
- HTTP POST
-
- HTTP DELETE
-
-
-
-
-
-
- FileSystem URIs vs HTTP URLs
-
- The FileSystem scheme of WebHDFS is "webhdfs://
".
- A WebHDFS FileSystem URI has the following format.
-
-
- webhdfs://<HOST>:<HTTP_PORT>/<PATH>
-
-
- The above WebHDFS URI corresponds to the below HDFS URI.
-
-
- hdfs://<HOST>:<RPC_PORT>/<PATH>
-
-
- In the REST API, the prefix "/webhdfs/v1
" is inserted in the path and a query is appended at the end.
- Therefore, the corresponding HTTP URL has the following format.
-
-
- http://<HOST>:<HTTP_PORT>/webhdfs/v1/<PATH>?op=...
-
-
-
-
- HDFS Configuration Options
-
- Below are the HDFS configuration options for WebHDFS.
-
-
-Property Name Description
-dfs.webhdfs.enabled
-Enable/disable WebHDFS in Namenodes and Datanodes
-
-dfs.web.authentication.kerberos.principal
-The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
- The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
- HTTP SPNEGO specification.
-
-dfs.web.authentication.kerberos.keytab
-The Kerberos keytab file with the credentials for the
- HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-
-
-
-
-
-
- Authentication
-
- When security is off , the authenticated user is the username specified in the user.name
query parameter.
- If the user.name
parameter is not set,
- the server may either set the authenticated user to a default web user, if there is any, or return an error response.
-
-
- When security is on , authentication is performed by either Hadoop delegation token or Kerberos SPNEGO.
- If a token is set in the delegation
query parameter, the authenticated user is the user encoded in the token.
- If the delegation
parameter is not set, the user is authenticated by Kerberos SPNEGO.
-
-
-Below are examples using the curl
command tool.
-
-
-
- Authentication when security is off:
-
-curl -i "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?[user.name=<USER>&]op=..."
-
-
- Authentication using Kerberos SPNEGO when security is on:
-
-curl -i --negotiate -u : "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=..."
-
-
- Authentication using Hadoop delegation token when security is on:
-
-curl -i "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?delegation=<TOKEN>&op=..."
-
-
-
-
-
-
- Proxy Users
-
- When the proxy user feature is enabled, a proxy user P may submit a request on behalf of another user U .
- The username of U must be specified in the doas
query parameter unless a delegation token is presented in authentication.
- In such case, the information of both users P and U must be encoded in the delegation token.
-
-
-
- A proxy request when security is off:
-
-curl -i "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?[user.name=<USER>&]doas=<USER>&op=..."
-
-
- A proxy request using Kerberos SPNEGO when security is on:
-
-curl -i --negotiate -u : "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?doas=<USER>&op=..."
-
-
- A proxy request using Hadoop delegation token when security is on:
-
-curl -i "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?delegation=<TOKEN>&op=..."
-
-
-
-
-
-
-
- File and Directory Operations
-
- Create and Write to a File
-
- Step 1: Submit a HTTP PUT request without automatically following redirects and without sending the file data.
-
-curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CREATE
- [&overwrite=<true|false>][&blocksize=<LONG>][&replication=<SHORT>]
- [&permission=<OCTAL>][&buffersize=<INT>]"
-
-The request is redirected to a datanode where the file data is to be written:
-
-HTTP/1.1 307 TEMPORARY_REDIRECT
-Location: http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=CREATE...
-Content-Length: 0
-
-
- Step 2: Submit another HTTP PUT request using the URL in the Location
header with the file data to be written.
-
-curl -i -X PUT -T <LOCAL_FILE> "http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=CREATE..."
-
-The client receives a 201 Created
response with zero content length
-and the WebHDFS URI of the file in the Location
header:
-
-HTTP/1.1 201 Created
-Location: webhdfs://<HOST>:<PORT>/<PATH>
-Content-Length: 0
-
-
-
-
- Note that the reason of having two-step create/append is
- for preventing clients to send out data before the redirect.
- This issue is addressed by the "Expect: 100-continue
" header in HTTP/1.1;
- see RFC 2616, Section 8.2.3 .
- Unfortunately, there are software library bugs (e.g. Jetty 6 HTTP server and Java 6 HTTP client),
- which do not correctly implement "Expect: 100-continue
".
- The two-step create/append is a temporary workaround for the software library bugs.
-
-
- See also:
- overwrite
,
- blocksize
,
- replication
,
- permission
,
- buffersize
,
- FileSystem.create
-
-
-
-
- Append to a File
-
- Step 1: Submit a HTTP POST request without automatically following redirects and without sending the file data.
-
-curl -i -X POST "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=APPEND[&buffersize=<INT>]"
-
-The request is redirected to a datanode where the file data is to be appended:
-
-HTTP/1.1 307 TEMPORARY_REDIRECT
-Location: http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=APPEND...
-Content-Length: 0
-
-
- Step 2: Submit another HTTP POST request using the URL in the Location
header with the file data to be appended.
-
-curl -i -X POST -T <LOCAL_FILE> "http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=APPEND..."
-
-The client receives a response with zero content length:
-
-HTTP/1.1 200 OK
-Content-Length: 0
-
-
-
-
- See the note in the previous section for the description of why this operation requires two steps.
-
-
- See also:
- buffersize
,
- FileSystem.append
-
-
-
-
- Open and Read a File
-
- Submit a HTTP GET request with automatically following redirects.
-
-curl -i -L "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=OPEN
- [&offset=<LONG>][&length=<LONG>][&buffersize=<INT>]"
-
-The request is redirected to a datanode where the file data can be read:
-
-HTTP/1.1 307 TEMPORARY_REDIRECT
-Location: http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=OPEN...
-Content-Length: 0
-
-The client follows the redirect to the datanode and receives the file data:
-
-HTTP/1.1 200 OK
-Content-Type: application/octet-stream
-Content-Length: 22
-
-Hello, webhdfs user!
-
-
-
-
- See also:
- offset
,
- length
,
- buffersize
,
- FileSystem.open
-
-
-
-
- Make a Directory
-
- Submit a HTTP PUT request.
-
- curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=MKDIRS[&permission=<OCTAL>]"
-
-The client receives a response with a boolean
JSON object :
-
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{"boolean": true}
-
-
-
-
- See also:
- permission
,
- FileSystem.mkdirs
-
-
-
-
- Rename a File/Directory
-
- Submit a HTTP PUT request.
-
-curl -i -X PUT "<HOST>:<PORT>/webhdfs/v1/<PATH>?op=RENAME&destination=<PATH>"
-
-The client receives a response with a boolean
JSON object :
-
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{"boolean": true}
-
-
-
-
- See also:
- destination
,
- FileSystem.rename
-
-
-
-
- Delete a File/Directory
-
- Submit a HTTP DELETE request.
-
-curl -i -X DELETE "http://<host>:<port>/webhdfs/v1/<path>?op=DELETE
- [&recursive=<true|false>]"
-
-The client receives a response with a boolean
JSON object :
-
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{"boolean": true}
-
-
-
-
- See also:
- recursive
,
- FileSystem.delete
-
-
-
-
- Status of a File/Directory
-
- Submit a HTTP GET request.
-
-curl -i "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETFILESTATUS"
-
-The client receives a response with a FileStatus
JSON object :
-
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
- "FileStatus":
- {
- "accessTime" : 0,
- "blockSize" : 0,
- "group" : "supergroup",
- "length" : 0, //in bytes, zero for directories
- "modificationTime": 1320173277227,
- "owner" : "webuser",
- "pathSuffix" : "",
- "permission" : "777",
- "replication" : 0,
- "type" : "DIRECTORY" //enum {FILE, DIRECTORY, SYMLINK}
- }
-}
-
-
-
-
- See also:
- FileSystem.getFileStatus
-
-
-
-
- List a Directory
-
- Submit a HTTP GET request.
-
-curl -i "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=LISTSTATUS"
-
-The client receives a response with a FileStatuses
JSON object :
-
-HTTP/1.1 200 OK
-Content-Type: application/json
-Content-Length: 427
-
-{
- "FileStatuses":
- {
- "FileStatus":
- [
- {
- "accessTime" : 1320171722771,
- "blockSize" : 33554432,
- "group" : "supergroup",
- "length" : 24930,
- "modificationTime": 1320171722771,
- "owner" : "webuser",
- "pathSuffix" : "a.patch",
- "permission" : "644",
- "replication" : 1,
- "type" : "FILE"
- },
- {
- "accessTime" : 0,
- "blockSize" : 0,
- "group" : "supergroup",
- "length" : 0,
- "modificationTime": 1320895981256,
- "owner" : "szetszwo",
- "pathSuffix" : "bar",
- "permission" : "711",
- "replication" : 0,
- "type" : "DIRECTORY"
- },
- ...
- ]
- }
-}
-
-
-
-
- See also:
- FileSystem.listStatus
-
-
-
-
-
-
- Other File System Operations
-
- Get Content Summary of a Directory
-
- Submit a HTTP GET request.
-
-curl -i "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETCONTENTSUMMARY"
-
-The client receives a response with a ContentSummary
JSON object :
-
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
- "ContentSummary":
- {
- "directoryCount": 2,
- "fileCount" : 1,
- "length" : 24930,
- "quota" : -1,
- "spaceConsumed" : 24930,
- "spaceQuota" : -1
- }
-}
-
-
-
-
- See also:
- FileSystem.getContentSummary
-
-
-
-
- Get File Checksum
-
- Submit a HTTP GET request.
-
-curl -i "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETFILECHECKSUM"
-
-The request is redirected to a datanode:
-
-HTTP/1.1 307 TEMPORARY_REDIRECT
-Location: http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=GETFILECHECKSUM...
-Content-Length: 0
-
-The client follows the redirect to the datanode and receives a FileChecksum
JSON object :
-
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
- "FileChecksum":
- {
- "algorithm": "MD5-of-1MD5-of-512CRC32",
- "bytes" : "eadb10de24aa315748930df6e185c0d ...",
- "length" : 28
- }
-}
-
-
-
-
- See also:
- FileSystem.getFileChecksum
-
-
-
-
- Get Home Directory
-
- Submit a HTTP GET request.
-
-curl -i "http://<HOST>:<PORT>/webhdfs/v1/?op=GETHOMEDIRECTORY"
-
-The client receives a response with a Path
JSON object :
-
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{"Path": "/user/szetszwo"}
-
-
-
-
- See also:
- FileSystem.getHomeDirectory
-
-
-
-
- Set Permission
-
- Submit a HTTP PUT request.
-
-curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETPERMISSION
- [&permission=<OCTAL>]"
-
-The client receives a response with zero content length:
-
-HTTP/1.1 200 OK
-Content-Length: 0
-
-
-
-
- See also:
- permission
,
- FileSystem.setPermission
-
-
-
-
- Set Owner
-
- Submit a HTTP PUT request.
-
-curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETOWNER
- [&owner=<USER>][&group=<GROUP>]"
-
-The client receives a response with zero content length:
-
-HTTP/1.1 200 OK
-Content-Length: 0
-
-
-
-
- See also:
- owner
,
- group
,
- FileSystem.setOwner
-
-
-
-
- Set Replication Factor
-
- Submit a HTTP PUT request.
-
-curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETREPLICATION
- [&replication=<SHORT>]"
-
-The client receives a response with a boolean
JSON object :
-
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{"boolean": true}
-
-
-
-
- See also:
- replication
,
- FileSystem.setReplication
-
-
-
-
- Set Access or Modification Time
-
- Submit a HTTP PUT request.
-
-curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETTIMES
- [&modificationtime=<TIME>][&accesstime=<TIME>]"
-
-The client receives a response with zero content length:
-
-HTTP/1.1 200 OK
-Content-Length: 0
-
-
-
-
- See also:
- modificationtime
,
- accesstime
,
- FileSystem.setTimes
-
-
-
-
-
-
- Delegation Token Operations
-
- Get Delegation Token
-
- Submit a HTTP GET request.
-
-curl -i "http://<HOST>:<PORT>/webhdfs/v1/?op=GETDELEGATIONTOKEN&renewer=<USER>"
-
-The client receives a response with a Token
JSON object :
-
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
- "Token":
- {
- "urlString": "JQAIaG9y..."
- }
-}
-
-
-
-
- See also:
- renewer
,
- FileSystem.getDelegationToken
-
-
-
-
- Renew Delegation Token
-
- Submit a HTTP PUT request.
-
-curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=<TOKEN>"
-
-The client receives a response with a long
JSON object :
-
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{"long": 1320962673997} //the new expiration time
-
-
-
-
- See also:
- token
,
- DistributedFileSystem.renewDelegationToken
-
-
-
-
- Cancel Delegation Token
-
- Submit a HTTP PUT request.
-
-curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=<TOKEN>"
-
-The client receives a response with zero content length:
-
-HTTP/1.1 200 OK
-Content-Length: 0
-
-
-
-
- See also:
- token
,
- DistributedFileSystem.cancelDelegationToken
-
-
-
-
-
-
- Error Responses
-
- When an operation fails, the server may throw an exception.
- The JSON schema of error responses is defined in RemoteException
JSON schema .
- The table below shows the mapping from exceptions to HTTP response codes.
-
-
- HTTP Response Codes
-
-Exceptions HTTP Response Codes
-IllegalArgumentException
400 Bad Request
-UnsupportedOperationException
400 Bad Request
-SecurityException
401 Unauthorized
-IOException
403 Forbidden
-FileNotFoundException
404 Not Found
-RumtimeException
500 Internal Server Error
-
-
- Below are examples of exception responses.
-
-
-
- Illegal Argument Exception
-
-HTTP/1.1 400 Bad Request
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
- "RemoteException":
- {
- "exception" : "IllegalArgumentException",
- "javaClassName": "java.lang.IllegalArgumentException",
- "message" : "Invalid value for webhdfs parameter \"permission\": ..."
- }
-}
-
-
-
-
- Security Exception
-
-HTTP/1.1 401 Unauthorized
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
- "RemoteException":
- {
- "exception" : "SecurityException",
- "javaClassName": "java.lang.SecurityException",
- "message" : "Failed to obtain user group information: ..."
- }
-}
-
-
-
-
- Access Control Exception
-
-HTTP/1.1 403 Forbidden
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
- "RemoteException":
- {
- "exception" : "AccessControlException",
- "javaClassName": "org.apache.hadoop.security.AccessControlException",
- "message" : "Permission denied: ..."
- }
-}
-
-
-
-
- File Not Found Exception
-
-HTTP/1.1 404 Not Found
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
- "RemoteException":
- {
- "exception" : "FileNotFoundException",
- "javaClassName": "java.io.FileNotFoundException",
- "message" : "File does not exist: /foo/a.patch"
- }
-}
-
-
-
-
-
-
-
- JSON Schemas
-
-All operations, except for OPEN
,
-either return a zero-length response or a JSON response .
-For OPEN
, the response is an octet-stream.
-The JSON schemas are shown below.
-See draft-zyp-json-schema-03
-for the syntax definitions of the JSON schemas.
-
-
- Boolean JSON Schema
-
-{
- "name" : "boolean",
- "properties":
- {
- "boolean":
- {
- "description": "A boolean value",
- "type" : "boolean",
- "required" : true
- }
- }
-}
-
-
- See also:
- MKDIRS
,
- RENAME
,
- DELETE
,
- SETREPLICATION
-
-
-
-
- ContentSummary JSON Schema
-
-{
- "name" : "ContentSummary",
- "properties":
- {
- "ContentSummary":
- {
- "type" : "object",
- "properties":
- {
- "directoryCount":
- {
- "description": "The number of directories.",
- "type" : "integer",
- "required" : true
- },
- "fileCount":
- {
- "description": "The number of files.",
- "type" : "integer",
- "required" : true
- },
- "length":
- {
- "description": "The number of bytes used by the content.",
- "type" : "integer",
- "required" : true
- },
- "quota":
- {
- "description": "The namespace quota of this directory.",
- "type" : "integer",
- "required" : true
- },
- "spaceConsumed":
- {
- "description": "The disk space consumed by the content.",
- "type" : "integer",
- "required" : true
- },
- "spaceQuota":
- {
- "description": "The disk space quota.",
- "type" : "integer",
- "required" : true
- }
- }
- }
- }
-}
-
-
- See also:
- GETCONTENTSUMMARY
-
-
-
-
- FileChecksum JSON Schema
-
-{
- "name" : "FileChecksum",
- "properties":
- {
- "FileChecksum":
- {
- "type" : "object",
- "properties":
- {
- "algorithm":
- {
- "description": "The name of the checksum algorithm.",
- "type" : "string",
- "required" : true
- },
- "bytes":
- {
- "description": "The byte sequence of the checksum in hexadecimal.",
- "type" : "string",
- "required" : true
- },
- "length":
- {
- "description": "The length of the bytes (not the length of the string).",
- "type" : "integer",
- "required" : true
- }
- }
- }
- }
-}
-
-
- See also:
- GETFILECHECKSUM
-
-
-
-
- FileStatus JSON Schema
-
-{
- "name" : "FileStatus",
- "properties":
- {
- "FileStatus": fileStatusProperties //See FileStatus Properties
- }
-}
-
-
- See also:
- GETFILESTATUS
,
- FileStatus
-
-
- FileStatus Properties
-
- JavaScript syntax is used to define fileStatusProperties
- so that it can be referred in both FileStatus
and FileStatuses
JSON schemas.
-
-
-var fileStatusProperties =
-{
- "type" : "object",
- "properties":
- {
- "accessTime":
- {
- "description": "The access time.",
- "type" : "integer",
- "required" : true
- },
- "blockSize":
- {
- "description": "The block size of a file.",
- "type" : "integer",
- "required" : true
- },
- "group":
- {
- "description": "The group owner.",
- "type" : "string",
- "required" : true
- },
- "length":
- {
- "description": "The number of bytes in a file.",
- "type" : "integer",
- "required" : true
- },
- "modificationTime":
- {
- "description": "The modification time.",
- "type" : "integer",
- "required" : true
- },
- "owner":
- {
- "description": "The user who is the owner.",
- "type" : "string",
- "required" : true
- },
- "pathSuffix":
- {
- "description": "The path suffix.",
- "type" : "string",
- "required" : true
- },
- "permission":
- {
- "description": "The permission represented as a octal string.",
- "type" : "string",
- "required" : true
- },
- "replication":
- {
- "description": "The number of replication of a file.",
- "type" : "integer",
- "required" : true
- },
- "symlink": //an optional property
- {
- "description": "The link target of a symlink.",
- "type" : "string"
- },
- "type":
- {
- "description": "The type of the path object.",
- "enum" : ["FILE", "DIRECTORY", "SYMLINK"],
- "required" : true
- }
- }
-};
-
-
-
-
-
- FileStatuses JSON Schema
-
- A FileStatuses
JSON object represents an array of FileStatus
JSON objects.
-
-
-{
- "name" : "FileStatuses",
- "properties":
- {
- "FileStatuses":
- {
- "type" : "object",
- "properties":
- {
- "FileStatus":
- {
- "description": "An array of FileStatus",
- "type" : "array",
- "items" : fileStatusProperties //See FileStatus Properties
- }
- }
- }
- }
-}
-
-
- See also:
- LISTSTATUS
,
- FileStatus
-
-
-
-
- Long JSON Schema
-
-{
- "name" : "long",
- "properties":
- {
- "long":
- {
- "description": "A long integer value",
- "type" : "integer",
- "required" : true
- }
- }
-}
-
-
- See also:
- RENEWDELEGATIONTOKEN
,
-
-
-
-
- Path JSON Schema
-
-{
- "name" : "Path",
- "properties":
- {
- "Path":
- {
- "description": "The string representation a Path.",
- "type" : "string",
- "required" : true
- }
- }
-}
-
-
- See also:
- GETHOMEDIRECTORY
,
- Path
-
-
-
-
- RemoteException JSON Schema
-
-{
- "name" : "RemoteException",
- "properties":
- {
- "RemoteException":
- {
- "type" : "object",
- "properties":
- {
- "exception":
- {
- "description": "Name of the exception",
- "type" : "string",
- "required" : true
- },
- "message":
- {
- "description": "Exception message",
- "type" : "string",
- "required" : true
- },
- "javaClassName": //an optional property
- {
- "description": "Java class name of the exception",
- "type" : "string",
- }
- }
- }
- }
-}
-
-
-
-
- Token JSON Schema
-
-{
- "name" : "Token",
- "properties":
- {
- "Token":
- {
- "type" : "object",
- "properties":
- {
- "urlString":
- {
- "description": "A delegation token encoded as a URL safe string.",
- "type" : "string",
- "required" : true
- }
- }
- }
- }
-}
-
-
- See also:
- GETDELEGATIONTOKEN
,
- the note in Delegation .
-
-
-
-
-
-
- HTTP Query Parameter Dictionary
-
- Access Time
-
- Name accesstime
- Description The access time of a file/directory.
- Type long
- Default Value -1 (means keeping it unchanged)
- Valid Values -1 or a timestamp
- Syntax Any integer.
-
-
- See also:
- SETTIMES
-
-
-
-
- Block Size
-
- Name blocksize
- Description The block size of a file.
- Type long
- Default Value Specified in the configuration.
- Valid Values > 0
- Syntax Any integer.
-
-
- See also:
- CREATE
-
-
-
-
- Buffer Size
-
- Name buffersize
- Description The size of the buffer used in transferring data.
- Type int
- Default Value Specified in the configuration.
- Valid Values > 0
- Syntax Any integer.
-
-
- See also:
- CREATE
,
- APPEND
,
- OPEN
-
-
-
-
- Delegation
-
- Name delegation
- Description The delegation token used for authentication.
- Type String
- Default Value <empty>
- Valid Values An encoded token.
- Syntax See the note below.
-
-
- Note that delegation tokens are encoded as a URL safe string;
- see encodeToUrlString()
- and decodeFromUrlString(String)
- in org.apache.hadoop.security.token.Token
for the details of the encoding.
-
-
- See also:
- Authentication
-
-
-
-
- Destination
-
- Name destination
- Description The destination path used in RENAME .
- Type Path
- Default Value <empty> (an invalid path)
- Valid Values An absolute FileSystem path without scheme and authority.
- Syntax Any path.
-
-
- See also:
- RENAME
-
-
-
-
- Do As
-
- Name doas
- Description Allowing a proxy user to do as another user.
- Type String
- Default Value null
- Valid Values Any valid username.
- Syntax Any string.
-
-
- See also:
- Proxy Users
-
-
-
-
- Group
-
- Name group
- Description The name of a group.
- Type String
- Default Value <empty> (means keeping it unchanged)
- Valid Values Any valid group name.
- Syntax Any string.
-
-
- See also:
- SETOWNER
-
-
-
-
- Length
-
- Name length
- Description The number of bytes to be processed.
- Type long
- Default Value null (means the entire file)
- Valid Values >= 0 or null
- Syntax Any integer.
-
-
- See also:
- OPEN
-
-
-
-
- Modification Time
-
- Name modificationtime
- Description The modification time of a file/directory.
- Type long
- Default Value -1 (means keeping it unchanged)
- Valid Values -1 or a timestamp
- Syntax Any integer.
-
-
- See also:
- SETTIMES
-
-
-
-
- Offset
-
- Name offset
- Description The starting byte position.
- Type long
- Default Value 0
- Valid Values >= 0
- Syntax Any integer.
-
-
- See also:
- OPEN
-
-
-
-
- Op
-
- Name op
- Description The name of the operation to be executed.
- Type enum
- Default Value null (an invalid value)
- Valid Values Any valid operation name.
- Syntax Any string.
-
-
- See also:
- Operations
-
-
-
-
- Overwrite
-
- Name overwrite
- Description If a file already exists, should it be overwritten?
- Type boolean
- Default Value false
- Valid Values true | false
- Syntax true | false
-
-
- See also:
- CREATE
-
-
-
-
- Owner
-
- Name owner
- Description The username who is the owner of a file/directory.
- Type String
- Default Value <empty> (means keeping it unchanged)
- Valid Values Any valid username.
- Syntax Any string.
-
-
- See also:
- SETOWNER
-
-
-
-
- Permission
-
- Name permission
- Description The permission of a file/directory.
- Type Octal
- Default Value 755
- Valid Values 0 - 1777
- Syntax Any radix-8 integer (leading zeros may be omitted.)
-
-
- See also:
- CREATE
,
- MKDIRS
,
- SETPERMISSION
-
-
-
-
- Recursive
-
- Name recursive
- Description Should the operation act on the content in the subdirectories?
- Type boolean
- Default Value false
- Valid Values true | false
- Syntax true | false
-
-
- See also:
- RENAME
-
-
-
-
- Renewer
-
- Name renewer
- Description The username of the renewer of a delegation token.
- Type String
- Default Value <empty> (means the current user)
- Valid Values Any valid username.
- Syntax Any string.
-
-
- See also:
- GETDELEGATIONTOKEN
-
-
-
-
- Replication
-
- Name replication
- Description The number of replications of a file.
- Type short
- Default Value Specified in the configuration.
- Valid Values > 0
- Syntax Any integer.
-
-
- See also:
- CREATE
,
- SETREPLICATION
-
-
-
-
-
-
- Username
-
- Name user.name
- Description The authenticated user; see Authentication .
- Type String
- Default Value null
- Valid Values Any valid username.
- Syntax Any string.
-
-
- See also:
- Authentication
-
-
-
-
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.gif b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.gif
deleted file mode 100644
index 94ccd83c399..00000000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.gif and /dev/null differ
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.odg b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.odg
deleted file mode 100644
index 7a5ba85189f..00000000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.odg and /dev/null differ
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/architecture.gif b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/architecture.gif
deleted file mode 100644
index 8d84a23b07a..00000000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/architecture.gif and /dev/null differ
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/core-logo.gif b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/core-logo.gif
deleted file mode 100644
index 57879bb6dd1..00000000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/core-logo.gif and /dev/null differ
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/favicon.ico b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/favicon.ico
deleted file mode 100644
index 161bcf7841c..00000000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/favicon.ico and /dev/null differ
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo-big.jpg b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo-big.jpg
deleted file mode 100644
index 0c6996cdcb0..00000000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo-big.jpg and /dev/null differ
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo.jpg b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo.jpg
deleted file mode 100644
index 809525d9f15..00000000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo.jpg and /dev/null differ
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/request-identify.jpg b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/request-identify.jpg
deleted file mode 100644
index 504cbaf0c79..00000000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/request-identify.jpg and /dev/null differ
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/skinconf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/skinconf.xml
deleted file mode 100644
index cfb2010ae5e..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/skinconf.xml
+++ /dev/null
@@ -1,366 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
- true
-
- false
-
- true
-
- true
-
-
- true
-
-
- true
-
-
- true
-
-
- false
-
-
- true
-
-
- Hadoop
- Scalable Computing Platform
- http://hadoop.apache.org/hdfs/
- images/hdfs-logo.jpg
-
-
- Hadoop
- Apache Hadoop
- http://hadoop.apache.org/
- images/hadoop-logo.jpg
-
-
-
-
-
-
- images/favicon.ico
-
-
- 2010
- The Apache Software Foundation.
- http://www.apache.org/licenses/
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- p.quote {
- margin-left: 2em;
- padding: .5em;
- background-color: #f0f0f0;
- font-family: monospace;
- }
-
-
- #content h1 {
- margin-bottom: .5em;
- font-size: 185%; color: black;
- font-family: arial;
- }
- h2, .h3 { font-size: 175%; color: black; font-family: arial; }
- h3, .h4 { font-size: 135%; color: black; font-family: arial; margin-bottom: 0.5em; }
- h4, .h5 { font-size: 125%; color: black; font-style: italic; font-weight: bold; font-family: arial; }
- h5, h6 { font-size: 110%; color: #363636; font-weight: bold; }
-
-
- pre.code {
- margin-left: 0em;
- padding: 0.5em;
- background-color: rgb(241,239,231);
- font-family: monospace;
- }
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 1in
- 1in
- 1.25in
- 1in
-
-
-
- false
-
-
- false
-
-
-
-
-
- Built with Apache Forrest
- http://forrest.apache.org/
- images/built-with-forrest-button.png
- 88
- 31
-
-
-
-
-
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/status.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/status.xml
deleted file mode 100644
index a8207267e5a..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/status.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Initial Import
-
-
-
-
-
-
-
-
- Customize this template project with your project's details. This
- TODO list is generated from 'status.xml'.
-
-
- Add lots of content. XML content goes in
- src/documentation/content/xdocs
, or wherever the
- ${project.xdocs-dir}
property (set in
- forrest.properties
) points.
-
-
- Mail forrest-dev@xml.apache.org
- with feedback.
-
-
-
-
-
-