Merge branch 'trunk' into HDFS-7240

This commit is contained in:
Anu Engineer 2017-04-04 12:50:22 -07:00
commit adc651044c
512 changed files with 92407 additions and 6300 deletions

View File

@ -2659,4 +2659,24 @@ available under the Creative Commons By Attribution 3.0 License.
available upon request from time to time. For the avoidance of doubt,
this trademark restriction does not form part of this License.
Creative Commons may be contacted at https://creativecommons.org/.
Creative Commons may be contacted at https://creativecommons.org/.
--------------------------------------------------------------------------------
For: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs
/server/datanode/checker/AbstractFuture.java and
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs
/server/datanode/checker/TimeoutFuture.java
Copyright (C) 2007 The Guava Authors
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.

File diff suppressed because one or more lines are too long

View File

@ -45,11 +45,6 @@
<artifactId>hadoop-annotations</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minikdc</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -318,6 +313,16 @@
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>org.codehaus.woodstox</groupId>
<artifactId>stax2-api</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.fasterxml</groupId>
<artifactId>aalto-xml</artifactId>
<scope>compile</scope>
</dependency>
</dependencies>
<build>

View File

@ -322,6 +322,16 @@ log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
# Fair scheduler requests log on state dump
log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=DEBUG,FSLOGGER
log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=false
log4j.appender.FSLOGGER=org.apache.log4j.RollingFileAppender
log4j.appender.FSLOGGER.File=${hadoop.log.dir}/fairscheduler-statedump.log
log4j.appender.FSLOGGER.layout=org.apache.log4j.PatternLayout
log4j.appender.FSLOGGER.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.FSLOGGER.MaxFileSize=${hadoop.log.maxfilesize}
log4j.appender.FSLOGGER.MaxBackupIndex=${hadoop.log.maxbackupindex}
#
# Add a logger for ozone that is separate from the Datanode.
#

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.conf;
import com.fasterxml.aalto.stax.InputFactoryImpl;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
import com.google.common.annotations.VisibleForTesting;
@ -65,9 +66,11 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.stream.XMLStreamConstants;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
@ -93,14 +96,10 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.util.StringUtils;
import org.w3c.dom.Attr;
import org.w3c.dom.DOMException;
import org.codehaus.stax2.XMLInputFactory2;
import org.codehaus.stax2.XMLStreamReader2;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.w3c.dom.Text;
import org.xml.sax.SAXException;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
@ -280,7 +279,13 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* the key most recently
*/
private Map<String, String[]> updatingResource;
/**
* Specify exact input factory to avoid time finding correct one.
* Factory is reusable across un-synchronized threads once initialized
*/
private static final XMLInputFactory2 factory = new InputFactoryImpl();
/**
* Class to keep the information about the keys which replace the deprecated
* ones.
@ -619,42 +624,44 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* deprecated key, the value of the deprecated key is set as the value for
* the provided property name.
*
* @param deprecations deprecation context
* @param name the property name
* @return the first property in the list of properties mapping
* the <code>name</code> or the <code>name</code> itself.
*/
private String[] handleDeprecation(DeprecationContext deprecations,
String name) {
String name) {
if (null != name) {
name = name.trim();
}
ArrayList<String > names = new ArrayList<String>();
if (isDeprecated(name)) {
DeprecatedKeyInfo keyInfo = deprecations.getDeprecatedKeyMap().get(name);
if (keyInfo != null) {
if (!keyInfo.getAndSetAccessed()) {
logDeprecation(keyInfo.getWarningMessage(name));
}
for (String newKey : keyInfo.newKeys) {
if (newKey != null) {
names.add(newKey);
}
// Initialize the return value with requested name
String[] names = new String[]{name};
// Deprecated keys are logged once and an updated names are returned
DeprecatedKeyInfo keyInfo = deprecations.getDeprecatedKeyMap().get(name);
if (keyInfo != null) {
if (!keyInfo.getAndSetAccessed()) {
logDeprecation(keyInfo.getWarningMessage(name));
}
// Override return value for deprecated keys
names = keyInfo.newKeys;
}
// If there are no overlay values we can return early
Properties overlayProperties = getOverlay();
if (overlayProperties.isEmpty()) {
return names;
}
// Update properties and overlays with reverse lookup values
for (String n : names) {
String deprecatedKey = deprecations.getReverseDeprecatedKeyMap().get(n);
if (deprecatedKey != null && !overlayProperties.containsKey(n)) {
String deprecatedValue = overlayProperties.getProperty(deprecatedKey);
if (deprecatedValue != null) {
getProps().setProperty(n, deprecatedValue);
overlayProperties.setProperty(n, deprecatedValue);
}
}
}
if(names.size() == 0) {
names.add(name);
}
for(String n : names) {
String deprecatedKey = deprecations.getReverseDeprecatedKeyMap().get(n);
if (deprecatedKey != null && !getOverlay().containsKey(n) &&
getOverlay().containsKey(deprecatedKey)) {
getProps().setProperty(n, getOverlay().getProperty(deprecatedKey));
getOverlay().setProperty(n, getOverlay().getProperty(deprecatedKey));
}
}
return names.toArray(new String[names.size()]);
return names;
}
private void handleDeprecation() {
@ -668,23 +675,26 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
}
}
static{
//print deprecation warning if hadoop-site.xml is found in classpath
static {
// Add default resources
addDefaultResource("core-default.xml");
addDefaultResource("core-site.xml");
// print deprecation warning if hadoop-site.xml is found in classpath
ClassLoader cL = Thread.currentThread().getContextClassLoader();
if (cL == null) {
cL = Configuration.class.getClassLoader();
}
if(cL.getResource("hadoop-site.xml")!=null) {
if (cL.getResource("hadoop-site.xml") != null) {
LOG.warn("DEPRECATED: hadoop-site.xml found in the classpath. " +
"Usage of hadoop-site.xml is deprecated. Instead use core-site.xml, "
+ "mapred-site.xml and hdfs-site.xml to override properties of " +
"core-default.xml, mapred-default.xml and hdfs-default.xml " +
"respectively");
addDefaultResource("hadoop-site.xml");
}
addDefaultResource("core-default.xml");
addDefaultResource("core-site.xml");
}
private Properties properties;
private Properties overlay;
private ClassLoader classLoader;
@ -746,7 +756,20 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
this.loadDefaults = other.loadDefaults;
setQuietMode(other.getQuietMode());
}
/**
* Reload existing configuration instances.
*/
public static synchronized void reloadExistingConfigurations() {
if (LOG.isDebugEnabled()) {
LOG.debug("Reloading " + REGISTRY.keySet().size()
+ " existing configurations");
}
for (Configuration conf : REGISTRY.keySet()) {
conf.reloadConfiguration();
}
}
/**
* Add a default resource. Resources are loaded in the order of the resources
* added.
@ -1205,7 +1228,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
"Property name must not be null");
Preconditions.checkArgument(
value != null,
"The value of property " + name + " must not be null");
"The value of property %s must not be null", name);
name = name.trim();
DeprecationContext deprecations = deprecationContext.get();
if (deprecations.getDeprecatedKeyMap().isEmpty()) {
@ -2595,8 +2618,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
return configMap;
}
private Document parse(DocumentBuilder builder, URL url)
throws IOException, SAXException {
private XMLStreamReader parse(URL url)
throws IOException, XMLStreamException {
if (!quietmode) {
if (LOG.isDebugEnabled()) {
LOG.debug("parsing URL " + url);
@ -2612,23 +2635,18 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
// with other users.
connection.setUseCaches(false);
}
return parse(builder, connection.getInputStream(), url.toString());
return parse(connection.getInputStream(), url.toString());
}
private Document parse(DocumentBuilder builder, InputStream is,
String systemId) throws IOException, SAXException {
private XMLStreamReader parse(InputStream is,
String systemId) throws IOException, XMLStreamException {
if (!quietmode) {
LOG.debug("parsing input stream " + is);
}
if (is == null) {
return null;
}
try {
return (systemId == null) ? builder.parse(is) : builder.parse(is,
systemId);
} finally {
is.close();
}
return factory.createXMLStreamReader(systemId, is);
}
private void loadResources(Properties properties,
@ -2638,11 +2656,6 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
for (String resource : defaultResources) {
loadResource(properties, new Resource(resource), quiet);
}
//support the hadoop-site.xml as a deprecated case
if(getResource("hadoop-site.xml")!=null) {
loadResource(properties, new Resource("hadoop-site.xml"), quiet);
}
}
for (int i = 0; i < resources.size(); i++) {
@ -2653,37 +2666,20 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
}
}
private Resource loadResource(Properties properties, Resource wrapper, boolean quiet) {
private Resource loadResource(Properties properties,
Resource wrapper, boolean quiet) {
String name = UNKNOWN_RESOURCE;
try {
Object resource = wrapper.getResource();
name = wrapper.getName();
DocumentBuilderFactory docBuilderFactory
= DocumentBuilderFactory.newInstance();
//ignore all comments inside the xml file
docBuilderFactory.setIgnoringComments(true);
//allow includes in the xml file
docBuilderFactory.setNamespaceAware(true);
try {
docBuilderFactory.setXIncludeAware(true);
} catch (UnsupportedOperationException e) {
LOG.error("Failed to set setXIncludeAware(true) for parser "
+ docBuilderFactory
+ ":" + e,
e);
}
DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
Document doc = null;
Element root = null;
XMLStreamReader2 reader = null;
boolean returnCachedProperties = false;
if (resource instanceof URL) { // an URL resource
doc = parse(builder, (URL)resource);
reader = (XMLStreamReader2)parse((URL)resource);
} else if (resource instanceof String) { // a CLASSPATH resource
URL url = getResource((String)resource);
doc = parse(builder, url);
reader = (XMLStreamReader2)parse(url);
} else if (resource instanceof Path) { // a file resource
// Can't use FileSystem API or we get an infinite loop
// since FileSystem uses Configuration API. Use java.io.File instead.
@ -2693,104 +2689,179 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
if (!quiet) {
LOG.debug("parsing File " + file);
}
doc = parse(builder, new BufferedInputStream(
reader = (XMLStreamReader2)parse(new BufferedInputStream(
new FileInputStream(file)), ((Path)resource).toString());
}
} else if (resource instanceof InputStream) {
doc = parse(builder, (InputStream) resource, null);
reader = (XMLStreamReader2)parse((InputStream)resource, null);
returnCachedProperties = true;
} else if (resource instanceof Properties) {
overlay(properties, (Properties)resource);
} else if (resource instanceof Element) {
root = (Element)resource;
}
if (root == null) {
if (doc == null) {
if (quiet) {
return null;
}
throw new RuntimeException(resource + " not found");
if (reader == null) {
if (quiet) {
return null;
}
root = doc.getDocumentElement();
throw new RuntimeException(resource + " not found");
}
Properties toAddTo = properties;
if(returnCachedProperties) {
toAddTo = new Properties();
}
if (!"configuration".equals(root.getTagName()))
LOG.fatal("bad conf file: top-level element not <configuration>");
NodeList props = root.getChildNodes();
DeprecationContext deprecations = deprecationContext.get();
for (int i = 0; i < props.getLength(); i++) {
Node propNode = props.item(i);
if (!(propNode instanceof Element))
continue;
Element prop = (Element)propNode;
if ("configuration".equals(prop.getTagName())) {
loadResource(toAddTo, new Resource(prop, name), quiet);
continue;
}
if (!"property".equals(prop.getTagName()))
LOG.warn("bad conf file: element not <property>");
String attr = null;
String value = null;
boolean finalParameter = false;
LinkedList<String> source = new LinkedList<String>();
StringBuilder token = new StringBuilder();
String confName = null;
String confValue = null;
boolean confFinal = false;
boolean fallbackAllowed = false;
boolean fallbackEntered = false;
boolean parseToken = false;
LinkedList<String> confSource = new LinkedList<String>();
Attr propAttr = prop.getAttributeNode("name");
if (propAttr != null)
attr = StringInterner.weakIntern(propAttr.getValue());
propAttr = prop.getAttributeNode("value");
if (propAttr != null)
value = StringInterner.weakIntern(propAttr.getValue());
propAttr = prop.getAttributeNode("final");
if (propAttr != null)
finalParameter = "true".equals(propAttr.getValue());
propAttr = prop.getAttributeNode("source");
if (propAttr != null)
source.add(StringInterner.weakIntern(propAttr.getValue()));
while (reader.hasNext()) {
switch (reader.next()) {
case XMLStreamConstants.START_ELEMENT:
switch (reader.getLocalName()) {
case "property":
confName = null;
confValue = null;
confFinal = false;
confSource.clear();
NodeList fields = prop.getChildNodes();
for (int j = 0; j < fields.getLength(); j++) {
Node fieldNode = fields.item(j);
if (!(fieldNode instanceof Element))
continue;
Element field = (Element)fieldNode;
if ("name".equals(field.getTagName()) && field.hasChildNodes())
attr = StringInterner.weakIntern(
((Text)field.getFirstChild()).getData().trim());
if ("value".equals(field.getTagName()) && field.hasChildNodes())
value = StringInterner.weakIntern(
((Text)field.getFirstChild()).getData());
if ("final".equals(field.getTagName()) && field.hasChildNodes())
finalParameter = "true".equals(((Text)field.getFirstChild()).getData());
if ("source".equals(field.getTagName()) && field.hasChildNodes())
source.add(StringInterner.weakIntern(
((Text)field.getFirstChild()).getData()));
}
source.add(name);
// Ignore this parameter if it has already been marked as 'final'
if (attr != null) {
if (deprecations.getDeprecatedKeyMap().containsKey(attr)) {
DeprecatedKeyInfo keyInfo =
deprecations.getDeprecatedKeyMap().get(attr);
keyInfo.clearAccessed();
for (String key:keyInfo.newKeys) {
// update new keys with deprecated key's value
loadProperty(toAddTo, name, key, value, finalParameter,
source.toArray(new String[source.size()]));
// First test for short format configuration
int attrCount = reader.getAttributeCount();
for (int i = 0; i < attrCount; i++) {
String propertyAttr = reader.getAttributeLocalName(i);
if ("name".equals(propertyAttr)) {
confName = StringInterner.weakIntern(
reader.getAttributeValue(i));
} else if ("value".equals(propertyAttr)) {
confValue = StringInterner.weakIntern(
reader.getAttributeValue(i));
} else if ("final".equals(propertyAttr)) {
confFinal = "true".equals(reader.getAttributeValue(i));
} else if ("source".equals(propertyAttr)) {
confSource.add(StringInterner.weakIntern(
reader.getAttributeValue(i)));
}
}
break;
case "name":
case "value":
case "final":
case "source":
parseToken = true;
token.setLength(0);
break;
case "include":
// Determine href for xi:include
String confInclude = null;
attrCount = reader.getAttributeCount();
for (int i = 0; i < attrCount; i++) {
String attrName = reader.getAttributeLocalName(i);
if ("href".equals(attrName)) {
confInclude = reader.getAttributeValue(i);
}
}
if (confInclude == null) {
break;
}
// Determine if the included resource is a classpath resource
// otherwise fallback to a file resource
// xi:include are treated as inline and retain current source
URL include = getResource(confInclude);
if (include != null) {
Resource classpathResource = new Resource(include, name);
loadResource(properties, classpathResource, quiet);
} else {
File href = new File(confInclude);
if (!href.isAbsolute()) {
// Included resources are relative to the current resource
File baseFile = new File(name).getParentFile();
href = new File(baseFile, href.getPath());
}
if (!href.exists()) {
// Resource errors are non-fatal iff there is 1 xi:fallback
fallbackAllowed = true;
break;
}
Resource uriResource = new Resource(href.toURI().toURL(), name);
loadResource(properties, uriResource, quiet);
}
break;
case "fallback":
fallbackEntered = true;
break;
case "configuration":
break;
default:
break;
}
else {
loadProperty(toAddTo, name, attr, value, finalParameter,
source.toArray(new String[source.size()]));
break;
case XMLStreamConstants.CHARACTERS:
if (parseToken) {
char[] text = reader.getTextCharacters();
token.append(text, reader.getTextStart(), reader.getTextLength());
}
break;
case XMLStreamConstants.END_ELEMENT:
switch (reader.getLocalName()) {
case "name":
if (token.length() > 0) {
confName = StringInterner.weakIntern(token.toString().trim());
}
break;
case "value":
if (token.length() > 0) {
confValue = StringInterner.weakIntern(token.toString());
}
break;
case "final":
confFinal = "true".equals(token.toString());
break;
case "source":
confSource.add(StringInterner.weakIntern(token.toString()));
break;
case "include":
if (fallbackAllowed && !fallbackEntered) {
throw new IOException("Fetch fail on include with no "
+ "fallback while loading '" + name + "'");
}
fallbackAllowed = false;
fallbackEntered = false;
break;
case "property":
if (confName == null || (!fallbackAllowed && fallbackEntered)) {
break;
}
confSource.add(name);
DeprecatedKeyInfo keyInfo =
deprecations.getDeprecatedKeyMap().get(confName);
if (keyInfo != null) {
keyInfo.clearAccessed();
for (String key : keyInfo.newKeys) {
// update new keys with deprecated key's value
loadProperty(toAddTo, name, key, confValue, confFinal,
confSource.toArray(new String[confSource.size()]));
}
} else {
loadProperty(toAddTo, name, confName, confValue, confFinal,
confSource.toArray(new String[confSource.size()]));
}
break;
default:
break;
}
default:
break;
}
}
reader.close();
if (returnCachedProperties) {
overlay(properties, toAddTo);
return new Resource(toAddTo, name);
@ -2799,15 +2870,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
} catch (IOException e) {
LOG.fatal("error parsing conf " + name, e);
throw new RuntimeException(e);
} catch (DOMException e) {
} catch (XMLStreamException e) {
LOG.fatal("error parsing conf " + name, e);
throw new RuntimeException(e);
} catch (SAXException e) {
LOG.fatal("error parsing conf " + name, e);
throw new RuntimeException(e);
} catch (ParserConfigurationException e) {
LOG.fatal("error parsing conf " + name , e);
throw new RuntimeException(e);
}
}

View File

@ -450,9 +450,21 @@ public abstract class AbstractFileSystem {
* @return server default configuration values
*
* @throws IOException an I/O error occurred
* @deprecated use {@link #getServerDefaults(Path)} instead
*/
@Deprecated
public abstract FsServerDefaults getServerDefaults() throws IOException;
/**
* Return a set of server default configuration values based on path.
* @param f path to fetch server defaults
* @return server default configuration values for path
* @throws IOException an I/O error occurred
*/
public FsServerDefaults getServerDefaults(final Path f) throws IOException {
return getServerDefaults();
}
/**
* Return the fully-qualified path of path f resolving the path
* through any internal symlinks or mount point
@ -548,7 +560,7 @@ public abstract class AbstractFileSystem {
}
FsServerDefaults ssDef = getServerDefaults();
FsServerDefaults ssDef = getServerDefaults(f);
if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) {
throw new IOException("Internal error: default blockSize is" +
" not a multiple of default bytesPerChecksum ");
@ -626,7 +638,7 @@ public abstract class AbstractFileSystem {
*/
public FSDataInputStream open(final Path f) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
return open(f, getServerDefaults().getFileBufferSize());
return open(f, getServerDefaults(f).getFileBufferSize());
}
/**

View File

@ -57,7 +57,7 @@ public abstract class ChecksumFs extends FilterFs {
throws IOException, URISyntaxException {
super(theFs);
defaultBytesPerChecksum =
getMyFs().getServerDefaults().getBytesPerChecksum();
getMyFs().getServerDefaults(new Path("/")).getBytesPerChecksum();
}
/**
@ -96,9 +96,10 @@ public abstract class ChecksumFs extends FilterFs {
return defaultBytesPerChecksum;
}
private int getSumBufferSize(int bytesPerSum, int bufferSize)
private int getSumBufferSize(int bytesPerSum, int bufferSize, Path file)
throws IOException {
int defaultBufferSize = getMyFs().getServerDefaults().getFileBufferSize();
int defaultBufferSize = getMyFs().getServerDefaults(file)
.getFileBufferSize();
int proportionalBufferSize = bufferSize / bytesPerSum;
return Math.max(bytesPerSum,
Math.max(proportionalBufferSize, defaultBufferSize));
@ -121,7 +122,7 @@ public abstract class ChecksumFs extends FilterFs {
public ChecksumFSInputChecker(ChecksumFs fs, Path file)
throws IOException, UnresolvedLinkException {
this(fs, file, fs.getServerDefaults().getFileBufferSize());
this(fs, file, fs.getServerDefaults(file).getFileBufferSize());
}
public ChecksumFSInputChecker(ChecksumFs fs, Path file, int bufferSize)
@ -132,7 +133,7 @@ public abstract class ChecksumFs extends FilterFs {
Path sumFile = fs.getChecksumFile(file);
try {
int sumBufferSize = fs.getSumBufferSize(fs.getBytesPerSum(),
bufferSize);
bufferSize, file);
sums = fs.getRawFs().open(sumFile, sumBufferSize);
byte[] version = new byte[CHECKSUM_VERSION.length];
@ -353,7 +354,7 @@ public abstract class ChecksumFs extends FilterFs {
// Now create the chekcsumfile; adjust the buffsize
int bytesPerSum = fs.getBytesPerSum();
int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize, file);
this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
absolutePermission, sumBufferSize, replication, blockSize, progress,

View File

@ -149,10 +149,16 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
}
@Override
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
return fsImpl.getServerDefaults();
}
@Override
public FsServerDefaults getServerDefaults(final Path f) throws IOException {
return fsImpl.getServerDefaults(f);
}
@Override
public Path getHomeDirectory() {
return fsImpl.getHomeDirectory();

View File

@ -0,0 +1,142 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable;
import java.io.IOException;
import java.util.EnumSet;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
/** Base of specific file system FSDataOutputStreamBuilder. */
public class FSDataOutputStreamBuilder{
private Path path = null;
private FsPermission permission = null;
private Integer bufferSize;
private Short replication;
private Long blockSize;
private Progressable progress = null;
private EnumSet<CreateFlag> flags = null;
private ChecksumOpt checksumOpt = null;
private final FileSystem fs;
public FSDataOutputStreamBuilder(FileSystem fileSystem, Path p) {
fs = fileSystem;
path = p;
}
protected Path getPath() {
return path;
}
protected FsPermission getPermission() {
if (permission == null) {
return FsPermission.getFileDefault();
}
return permission;
}
public FSDataOutputStreamBuilder setPermission(final FsPermission perm) {
Preconditions.checkNotNull(perm);
permission = perm;
return this;
}
protected int getBufferSize() {
if (bufferSize == null) {
return fs.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
IO_FILE_BUFFER_SIZE_DEFAULT);
}
return bufferSize;
}
public FSDataOutputStreamBuilder setBufferSize(int bufSize) {
bufferSize = bufSize;
return this;
}
protected short getReplication() {
if (replication == null) {
return fs.getDefaultReplication(getPath());
}
return replication;
}
public FSDataOutputStreamBuilder setReplication(short replica) {
replication = replica;
return this;
}
protected long getBlockSize() {
if (blockSize == null) {
return fs.getDefaultBlockSize(getPath());
}
return blockSize;
}
public FSDataOutputStreamBuilder setBlockSize(long blkSize) {
blockSize = blkSize;
return this;
}
protected Progressable getProgress() {
return progress;
}
public FSDataOutputStreamBuilder setProgress(final Progressable prog) {
Preconditions.checkNotNull(prog);
progress = prog;
return this;
}
protected EnumSet<CreateFlag> getFlags() {
if (flags == null) {
return EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
}
return flags;
}
public FSDataOutputStreamBuilder setFlags(
final EnumSet<CreateFlag> enumFlags) {
Preconditions.checkNotNull(enumFlags);
flags = enumFlags;
return this;
}
protected ChecksumOpt getChecksumOpt() {
return checksumOpt;
}
public FSDataOutputStreamBuilder setChecksumOpt(
final ChecksumOpt chksumOpt) {
Preconditions.checkNotNull(chksumOpt);
checksumOpt = chksumOpt;
return this;
}
public FSDataOutputStream build() throws IOException {
return fs.create(getPath(), getPermission(), getFlags(), getBufferSize(),
getReplication(), getBlockSize(), getProgress(), getChecksumOpt());
}
}

View File

@ -207,6 +207,15 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
return permission;
}
/**
* Tell whether the underlying file or directory has ACLs set.
*
* @return true if the underlying file or directory has ACLs set.
*/
public boolean hasAcl() {
return permission.getAclBit();
}
/**
* Tell whether the underlying file or directory is encrypted or not.
*
@ -215,7 +224,16 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
public boolean isEncrypted() {
return permission.getEncryptedBit();
}
/**
* Tell whether the underlying file or directory is erasure coded or not.
*
* @return true if the underlying file or directory is erasure coded.
*/
public boolean isErasureCoded() {
return permission.getErasureCodedBit();
}
/**
* Get the owner of the file.
* @return owner of the file. The string could be empty if there is no
@ -390,6 +408,9 @@ public class FileStatus implements Writable, Comparable<FileStatus>,
if(isSymlink()) {
sb.append("; symlink=" + symlink);
}
sb.append("; hasAcl=" + hasAcl());
sb.append("; isEncrypted=" + isEncrypted());
sb.append("; isErasureCoded=" + isErasureCoded());
sb.append("}");
return sb.toString();
}

View File

@ -4138,4 +4138,13 @@ public abstract class FileSystem extends Configured implements Closeable {
public static GlobalStorageStatistics getGlobalStorageStatistics() {
return GlobalStorageStatistics.INSTANCE;
}
/**
* Create a new FSDataOutputStreamBuilder for the file with path.
* @param path file path
* @return a FSDataOutputStreamBuilder object to build the file
*/
public FSDataOutputStreamBuilder newFSDataOutputStreamBuilder(Path path) {
return new FSDataOutputStreamBuilder(this, path);
}
}

View File

@ -665,4 +665,9 @@ public class FilterFileSystem extends FileSystem {
public Collection<FileStatus> getTrashRoots(boolean allUsers) {
return fs.getTrashRoots(allUsers);
}
@Override
public FSDataOutputStreamBuilder newFSDataOutputStreamBuilder(Path path) {
return fs.newFSDataOutputStreamBuilder(path);
}
}

View File

@ -57,8 +57,7 @@ public abstract class FilterFs extends AbstractFileSystem {
}
protected FilterFs(AbstractFileSystem fs) throws URISyntaxException {
super(fs.getUri(), fs.getUri().getScheme(),
fs.getUri().getAuthority() != null, fs.getUriDefaultPort());
super(fs.getUri(), fs.getUri().getScheme(), false, fs.getUriDefaultPort());
myFs = fs;
}
@ -147,10 +146,15 @@ public abstract class FilterFs extends AbstractFileSystem {
}
@Override
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
return myFs.getServerDefaults();
}
@Override
public FsServerDefaults getServerDefaults(final Path f) throws IOException {
return myFs.getServerDefaults(f);
}
@Override
public Path resolvePath(final Path p) throws FileNotFoundException,

View File

@ -1268,4 +1268,9 @@ public class HarFileSystem extends FileSystem {
public short getDefaultReplication(Path f) {
return fs.getDefaultReplication(f);
}
@Override
public FSDataOutputStreamBuilder newFSDataOutputStreamBuilder(Path path) {
return fs.newFSDataOutputStreamBuilder(path);
}
}

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.DelegateToFileSystem;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
/**
* The FtpFs implementation of AbstractFileSystem.
@ -57,7 +58,13 @@ public class FtpFs extends DelegateToFileSystem {
}
@Override
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
return FtpConfigKeys.getServerDefaults();
}
@Override
public FsServerDefaults getServerDefaults(final Path f) throws IOException {
return FtpConfigKeys.getServerDefaults();
}
}

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.DelegateToFileSystem;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
/**
@ -63,6 +64,13 @@ public class RawLocalFs extends DelegateToFileSystem {
}
@Override
public FsServerDefaults getServerDefaults(final Path f)
throws IOException {
return LocalConfigKeys.getServerDefaults();
}
@Override
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
return LocalConfigKeys.getServerDefaults();
}

View File

@ -312,6 +312,13 @@ public class FsPermission implements Writable, Serializable,
return false;
}
/**
* Returns true if the file or directory is erasure coded.
*/
public boolean getErasureCodedBit() {
return false;
}
/** Set the user file creation mask (umask) */
public static void setUMask(Configuration conf, FsPermission umask) {
conf.set(UMASK_LABEL, String.format("%1$03o", umask.toShort()));

View File

@ -43,10 +43,6 @@ public class PermissionStatus implements Writable {
public static PermissionStatus createImmutable(
String user, String group, FsPermission permission) {
return new PermissionStatus(user, group, permission) {
@Override
public PermissionStatus applyUMask(FsPermission umask) {
throw new UnsupportedOperationException();
}
@Override
public void readFields(DataInput in) throws IOException {
throw new UnsupportedOperationException();
@ -76,15 +72,6 @@ public class PermissionStatus implements Writable {
/** Return permission */
public FsPermission getPermission() {return permission;}
/**
* Apply umask.
* @see FsPermission#applyUMask(FsPermission)
*/
public PermissionStatus applyUMask(FsPermission umask) {
permission = permission.applyUMask(umask);
return this;
}
@Override
public void readFields(DataInput in) throws IOException {
username = Text.readString(in, Text.DEFAULT_MAX_LEN);

View File

@ -41,12 +41,13 @@ class SetReplication extends FsCommand {
public static final String NAME = "setrep";
public static final String USAGE = "[-R] [-w] <rep> <path> ...";
public static final String DESCRIPTION =
"Set the replication level of a file. If <path> is a directory " +
"then the command recursively changes the replication factor of " +
"all files under the directory tree rooted at <path>.\n" +
"-w: It requests that the command waits for the replication " +
"to complete. This can potentially take a very long time.\n" +
"-R: It is accepted for backwards compatibility. It has no effect.";
"Set the replication level of a file. If <path> is a directory " +
"then the command recursively changes the replication factor of " +
"all files under the directory tree rooted at <path>. " +
"The EC files will be ignored here.\n" +
"-w: It requests that the command waits for the replication " +
"to complete. This can potentially take a very long time.\n" +
"-R: It is accepted for backwards compatibility. It has no effect.";
protected short newRep = 0;
protected List<PathData> waitList = new LinkedList<PathData>();
@ -84,11 +85,20 @@ class SetReplication extends FsCommand {
}
if (item.stat.isFile()) {
if (!item.fs.setReplication(item.path, newRep)) {
throw new IOException("Could not set replication for: " + item);
// Do the checking if the file is erasure coded since
// replication factor for an EC file is meaningless.
if (!item.stat.isErasureCoded()) {
if (!item.fs.setReplication(item.path, newRep)) {
throw new IOException("Could not set replication for: " + item);
}
out.println("Replication " + newRep + " set: " + item);
if (waitOpt) {
waitList.add(item);
}
} else {
out.println("Did not set replication for: " + item
+ ", because it's an erasure coded file.");
}
out.println("Replication " + newRep + " set: " + item);
if (waitOpt) waitList.add(item);
}
}

View File

@ -67,7 +67,7 @@ class XAttrCommands extends FsCommand {
"0x and 0s, respectively.\n" +
"<path>: The file or directory.\n";
private final static Function<String, XAttrCodec> enValueOfFunc =
Enums.valueOfFunction(XAttrCodec.class);
Enums.stringConverter(XAttrCodec.class);
private String name = null;
private boolean dump = false;

View File

@ -37,8 +37,10 @@ import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
@ -99,8 +101,7 @@ class ChRootedFs extends AbstractFileSystem {
public ChRootedFs(final AbstractFileSystem fs, final Path theRoot)
throws URISyntaxException {
super(fs.getUri(), fs.getUri().getScheme(),
fs.getUri().getAuthority() != null, fs.getUriDefaultPort());
super(fs.getUri(), fs.getUri().getScheme(), false, fs.getUriDefaultPort());
myFs = fs;
myFs.checkPath(theRoot);
chRootPathPart = new Path(myFs.getUriPath(theRoot));
@ -221,10 +222,16 @@ class ChRootedFs extends AbstractFileSystem {
}
@Override
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
return myFs.getServerDefaults();
}
@Override
public FsServerDefaults getServerDefaults(final Path f) throws IOException {
return myFs.getServerDefaults(fullPath(f));
}
@Override
public int getUriDefaultPort() {
return myFs.getUriDefaultPort();
@ -236,6 +243,18 @@ class ChRootedFs extends AbstractFileSystem {
return myFs.listStatus(fullPath(f));
}
@Override
public RemoteIterator<FileStatus> listStatusIterator(final Path f)
throws IOException, UnresolvedLinkException {
return myFs.listStatusIterator(fullPath(f));
}
@Override
public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
throws IOException, UnresolvedLinkException {
return myFs.listLocatedStatus(fullPath(f));
}
@Override
public void mkdir(final Path dir, final FsPermission permission,
final boolean createParent) throws IOException, UnresolvedLinkException {

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
@ -239,10 +240,22 @@ public class ViewFs extends AbstractFileSystem {
}
@Override
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
return LocalConfigKeys.getServerDefaults();
}
@Override
public FsServerDefaults getServerDefaults(final Path f) throws IOException {
InodeTree.ResolveResult<AbstractFileSystem> res;
try {
res = fsState.resolve(getUriPath(f), true);
} catch (FileNotFoundException fnfe) {
return LocalConfigKeys.getServerDefaults();
}
return res.targetFileSystem.getServerDefaults(res.remainingPath);
}
@Override
public int getUriDefaultPort() {
return -1;
@ -388,26 +401,32 @@ public class ViewFs extends AbstractFileSystem {
if (res.isInternalDir()) {
return fsIter;
}
return new RemoteIterator<FileStatus>() {
final RemoteIterator<FileStatus> myIter;
final ChRootedFs targetFs;
{ // Init
myIter = fsIter;
targetFs = (ChRootedFs) res.targetFileSystem;
}
return new WrappingRemoteIterator<FileStatus>(res, fsIter, f) {
@Override
public boolean hasNext() throws IOException {
return myIter.hasNext();
public FileStatus getViewFsFileStatus(FileStatus stat, Path newPath) {
return new ViewFsFileStatus(stat, newPath);
}
};
}
@Override
public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
final InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(f), true);
final RemoteIterator<LocatedFileStatus> fsIter =
res.targetFileSystem.listLocatedStatus(res.remainingPath);
if (res.isInternalDir()) {
return fsIter;
}
return new WrappingRemoteIterator<LocatedFileStatus>(res, fsIter, f) {
@Override
public FileStatus next() throws IOException {
FileStatus status = myIter.next();
String suffix = targetFs.stripOutRoot(status.getPath());
return new ViewFsFileStatus(status, makeQualified(
suffix.length() == 0 ? f : new Path(res.resolvedPath, suffix)));
public LocatedFileStatus getViewFsFileStatus(LocatedFileStatus stat,
Path newPath) {
return new ViewFsLocatedFileStatus(stat, newPath);
}
};
}
@ -773,6 +792,42 @@ public class ViewFs extends AbstractFileSystem {
return res.targetFileSystem.getStoragePolicy(res.remainingPath);
}
/**
* Helper class to perform some transformation on results returned
* from a RemoteIterator.
*/
private abstract class WrappingRemoteIterator<T extends FileStatus>
implements RemoteIterator<T> {
private final String resolvedPath;
private final ChRootedFs targetFs;
private final RemoteIterator<T> innerIter;
private final Path originalPath;
WrappingRemoteIterator(InodeTree.ResolveResult<AbstractFileSystem> res,
RemoteIterator<T> innerIter, Path originalPath) {
this.resolvedPath = res.resolvedPath;
this.targetFs = (ChRootedFs)res.targetFileSystem;
this.innerIter = innerIter;
this.originalPath = originalPath;
}
@Override
public boolean hasNext() throws IOException {
return innerIter.hasNext();
}
@Override
public T next() throws IOException {
T status = innerIter.next();
String suffix = targetFs.stripOutRoot(status.getPath());
Path newPath = makeQualified(suffix.length() == 0 ? originalPath
: new Path(resolvedPath, suffix));
return getViewFsFileStatus(status, newPath);
}
protected abstract T getViewFsFileStatus(T status, Path newPath);
}
/*
* An instance of this class represents an internal dir of the viewFs
* ie internal dir of the mount table.
@ -884,8 +939,14 @@ public class ViewFs extends AbstractFileSystem {
}
@Override
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
throw new IOException("FsServerDefaults not implemented yet");
return LocalConfigKeys.getServerDefaults();
}
@Override
public FsServerDefaults getServerDefaults(final Path f) throws IOException {
return LocalConfigKeys.getServerDefaults();
}
@Override

View File

@ -55,7 +55,6 @@ import org.apache.zookeeper.data.ACL;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
@InterfaceAudience.LimitedPrivate("HDFS")
@ -511,7 +510,7 @@ public abstract class ZKFailoverController {
doFence(target);
} catch (Throwable t) {
recordActiveAttempt(new ActiveAttemptRecord(false, "Unable to fence old active: " + StringUtils.stringifyException(t)));
Throwables.propagate(t);
throw t;
}
}

View File

@ -240,12 +240,15 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
private final long delay;
private final RetryAction action;
private final long expectedFailoverCount;
private final Exception failException;
RetryInfo(long delay, RetryAction action, long expectedFailoverCount) {
RetryInfo(long delay, RetryAction action, long expectedFailoverCount,
Exception failException) {
this.delay = delay;
this.retryTime = Time.monotonicNow() + delay;
this.action = action;
this.expectedFailoverCount = expectedFailoverCount;
this.failException = failException;
}
boolean isFailover() {
@ -258,11 +261,16 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
&& action.action == RetryAction.RetryDecision.FAIL;
}
Exception getFailException() {
return failException;
}
static RetryInfo newRetryInfo(RetryPolicy policy, Exception e,
Counters counters, boolean idempotentOrAtMostOnce,
long expectedFailoverCount) throws Exception {
RetryAction max = null;
long maxRetryDelay = 0;
Exception ex = null;
final Iterable<Exception> exceptions = e instanceof MultiException ?
((MultiException) e).getExceptions().values()
@ -279,10 +287,13 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
if (max == null || max.action.compareTo(a.action) < 0) {
max = a;
if (a.action == RetryAction.RetryDecision.FAIL) {
ex = exception;
}
}
}
return new RetryInfo(maxRetryDelay, max, expectedFailoverCount);
return new RetryInfo(maxRetryDelay, max, expectedFailoverCount, ex);
}
}
@ -359,7 +370,7 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
+ ". Not retrying because " + retryInfo.action.reason, e);
}
}
throw e;
throw retryInfo.getFailException();
}
log(method, retryInfo.isFailover(), counters.failovers, retryInfo.delay, e);

View File

@ -1768,7 +1768,9 @@ public class Client implements AutoCloseable {
}
void setSaslClient(SaslRpcClient client) throws IOException {
setInputStream(client.getInputStream(in));
// Wrap the input stream in a BufferedInputStream to fill the buffer
// before reading its length (HADOOP-14062).
setInputStream(new BufferedInputStream(client.getInputStream(in)));
setOutputStream(client.getOutputStream(out));
}

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.metrics2;
import com.google.common.base.MoreObjects;
import com.google.common.base.Objects;
import static com.google.common.base.Preconditions.*;
@ -84,7 +85,7 @@ public abstract class AbstractMetric implements MetricsInfo {
}
@Override public String toString() {
return Objects.toStringHelper(this)
return MoreObjects.toStringHelper(this)
.add("info", info)
.add("value", value())
.toString();

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.metrics2;
import com.google.common.base.MoreObjects;
import com.google.common.base.Objects;
import static com.google.common.base.Preconditions.*;
@ -80,7 +81,7 @@ public class MetricsTag implements MetricsInfo {
}
@Override public String toString() {
return Objects.toStringHelper(this)
return MoreObjects.toStringHelper(this)
.add("info", info)
.add("value", value())
.toString();

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.metrics2.impl;
import com.google.common.base.MoreObjects;
import com.google.common.base.Objects;
import com.google.common.collect.Iterables;
@ -43,7 +44,7 @@ abstract class AbstractMetricsRecord implements MetricsRecord {
}
@Override public String toString() {
return Objects.toStringHelper(this)
return MoreObjects.toStringHelper(this)
.add("timestamp", timestamp())
.add("name", name())
.add("description", description())

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2.impl;
import com.google.common.base.Objects;
import com.google.common.base.MoreObjects;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
@ -48,7 +48,7 @@ public enum MsInfo implements MetricsInfo {
}
@Override public String toString() {
return Objects.toStringHelper(this)
return MoreObjects.toStringHelper(this)
.add("name", name()).add("description", desc)
.toString();
}

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.metrics2.lib;
import com.google.common.base.MoreObjects;
import com.google.common.base.Objects;
import static com.google.common.base.Preconditions.*;
import org.apache.hadoop.metrics2.MetricsInfo;
@ -55,7 +56,7 @@ class MetricsInfoImpl implements MetricsInfo {
}
@Override public String toString() {
return Objects.toStringHelper(this)
return MoreObjects.toStringHelper(this)
.add("name", name).add("description", description)
.toString();
}

View File

@ -22,7 +22,7 @@ import java.util.Collection;
import java.util.Map;
import com.google.common.collect.Maps;
import com.google.common.base.Objects;
import com.google.common.base.MoreObjects;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -416,7 +416,7 @@ public class MetricsRegistry {
}
@Override public String toString() {
return Objects.toStringHelper(this)
return MoreObjects.toStringHelper(this)
.add("info", metricsInfo).add("tags", tags()).add("metrics", metrics())
.toString();
}

View File

@ -18,11 +18,11 @@
package org.apache.hadoop.metrics2.source;
import com.google.common.base.Objects;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
import com.google.common.base.MoreObjects;
/**
* JVM and logging related metrics info instances
*/
@ -60,7 +60,7 @@ public enum JvmMetricsInfo implements MetricsInfo {
@Override public String description() { return desc; }
@Override public String toString() {
return Objects.toStringHelper(this)
return MoreObjects.toStringHelper(this)
.add("name", name()).add("description", desc)
.toString();
}

View File

@ -31,7 +31,7 @@ import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsTag;
import com.google.common.base.Objects;
import com.google.common.base.MoreObjects;
import com.google.common.collect.Maps;
/**
@ -127,7 +127,7 @@ public class MetricsCache {
}
@Override public String toString() {
return Objects.toStringHelper(this)
return MoreObjects.toStringHelper(this)
.add("tags", tags).add("metrics", metrics)
.toString();
}

View File

@ -63,7 +63,7 @@ public class InnerNodeImpl extends NodeBase implements InnerNode {
/** Judge if this node represents a rack
* @return true if it has no child or its children are not InnerNodes
*/
boolean isRack() {
public boolean isRack() {
if (children.isEmpty()) {
return true;
}
@ -81,7 +81,7 @@ public class InnerNodeImpl extends NodeBase implements InnerNode {
* @param n a node
* @return true if this node is an ancestor of <i>n</i>
*/
protected boolean isAncestor(Node n) {
public boolean isAncestor(Node n) {
return getPath(this).equals(NodeBase.PATH_SEPARATOR_STR) ||
(n.getNetworkLocation()+NodeBase.PATH_SEPARATOR_STR).
startsWith(getPath(this)+NodeBase.PATH_SEPARATOR_STR);
@ -92,12 +92,12 @@ public class InnerNodeImpl extends NodeBase implements InnerNode {
* @param n a node
* @return true if this node is the parent of <i>n</i>
*/
protected boolean isParent(Node n) {
public boolean isParent(Node n) {
return n.getNetworkLocation().equals(getPath(this));
}
/* Return a child name of this node who is an ancestor of node <i>n</i> */
protected String getNextAncestorName(Node n) {
public String getNextAncestorName(Node n) {
if (!isAncestor(n)) {
throw new IllegalArgumentException(
this + "is not an ancestor of " + n);

View File

@ -496,7 +496,7 @@ public class NetworkTopology {
}
}
private Node chooseRandom(final String scope, String excludedScope,
protected Node chooseRandom(final String scope, String excludedScope,
final Collection<Node> excludedNodes) {
if (excludedScope != null) {
if (scope.startsWith(excludedScope)) {

View File

@ -308,7 +308,7 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
}
@Override
boolean isRack() {
public boolean isRack() {
// it is node group
if (getChildren().isEmpty()) {
return false;

View File

@ -45,7 +45,7 @@ public class NodeBase implements Node {
/** Construct a node from its path
* @param path
* a concatenation of this node's location, the path seperator, and its name
* a concatenation of this node's location, the path separator, and its name
*/
public NodeBase(String path) {
path = normalize(path);

View File

@ -321,11 +321,7 @@ public final class DomainSocketWatcher implements Closeable {
toAdd.add(entry);
kick();
while (true) {
try {
processedCond.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
processedCond.awaitUninterruptibly();
if (!toAdd.contains(entry)) {
break;
}
@ -347,11 +343,7 @@ public final class DomainSocketWatcher implements Closeable {
toRemove.put(sock.fd, sock);
kick();
while (true) {
try {
processedCond.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
processedCond.awaitUninterruptibly();
if (!toRemove.containsKey(sock.fd)) {
break;
}

View File

@ -56,12 +56,16 @@ public abstract class CredentialProviderFactory {
try {
URI uri = new URI(path);
boolean found = false;
for(CredentialProviderFactory factory: serviceLoader) {
CredentialProvider kp = factory.createProvider(uri, conf);
if (kp != null) {
result.add(kp);
found = true;
break;
// Iterate serviceLoader in a synchronized block since
// serviceLoader iterator is not thread-safe.
synchronized (serviceLoader) {
for (CredentialProviderFactory factory : serviceLoader) {
CredentialProvider kp = factory.createProvider(uri, conf);
if (kp != null) {
result.add(kp);
found = true;
break;
}
}
}
if (!found) {

View File

@ -269,7 +269,7 @@ public final class ConfTest {
} else {
String confDirName = System.getenv(HADOOP_CONF_DIR);
if (confDirName == null) {
terminate(1, HADOOP_CONF_DIR + " does not defined");
terminate(1, HADOOP_CONF_DIR + " is not defined");
}
File confDir = new File(confDirName);
if (!confDir.isDirectory()) {

View File

@ -28,7 +28,7 @@ import org.apache.hadoop.classification.InterfaceStability;
* to explicitly report progress to the Hadoop framework. This is especially
* important for operations which take significant amount of time since,
* in-lieu of the reported progress, the framework has to assume that an error
* has occured and time-out the operation.</p>
* has occurred and time-out the operation.</p>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable

View File

@ -372,8 +372,8 @@ public class StringUtils {
/**
* Returns an arraylist of strings.
* @param str the comma seperated string values
* @return the arraylist of the comma seperated string values
* @param str the comma separated string values
* @return the arraylist of the comma separated string values
*/
public static String[] getStrings(String str){
String delim = ",";
@ -384,7 +384,7 @@ public class StringUtils {
* Returns an arraylist of strings.
* @param str the string values
* @param delim delimiter to separate the values
* @return the arraylist of the seperated string values
* @return the arraylist of the separated string values
*/
public static String[] getStrings(String str, String delim){
Collection<String> values = getStringCollection(str, delim);
@ -396,7 +396,7 @@ public class StringUtils {
/**
* Returns a collection of strings.
* @param str comma seperated string values
* @param str comma separated string values
* @return an <code>ArrayList</code> of string values
*/
public static Collection<String> getStringCollection(String str){

View File

@ -30,7 +30,7 @@ public class UTF8ByteArrayUtils {
* @param start starting offset
* @param end ending position
* @param b the byte to find
* @return position that first byte occures otherwise -1
* @return position that first byte occurs, otherwise -1
*/
public static int findByte(byte [] utf, int start, int end, byte b) {
for(int i=start; i<end; i++) {
@ -47,7 +47,7 @@ public class UTF8ByteArrayUtils {
* @param start starting offset
* @param end ending position
* @param b the bytes to find
* @return position that first byte occures otherwise -1
* @return position that first byte occurs, otherwise -1
*/
public static int findBytes(byte [] utf, int start, int end, byte[] b) {
int matchEnd = end - b.length;

View File

@ -19,7 +19,6 @@
package org.apache.hadoop.util.curator;
import com.google.common.base.Preconditions;
import com.google.common.collect.Sets;
import org.apache.curator.framework.recipes.locks.Reaper;
import org.apache.curator.utils.CloseableUtils;
import org.apache.curator.framework.CuratorFramework;
@ -34,6 +33,7 @@ import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
@ -82,7 +82,7 @@ public class ChildReaper implements Closeable
* @since 15.0
*/
public static <E> Set<E> newConcurrentHashSet() {
return Sets.newSetFromMap(new ConcurrentHashMap<E, Boolean>());
return Collections.newSetFromMap(new ConcurrentHashMap<E, Boolean>());
}
private enum State

View File

@ -8298,7 +8298,7 @@ FilePath FilePath::RemoveExtension(const char* extension) const {
return *this;
}
// Returns a pointer to the last occurence of a valid path separator in
// Returns a pointer to the last occurrence of a valid path separator in
// the FilePath. On Windows, for example, both '/' and '\' are valid path
// separators. Returns NULL if no path separator was found.
const char* FilePath::FindLastPathSeparator() const {

View File

@ -4457,7 +4457,7 @@ class GTEST_API_ FilePath {
void Normalize();
// Returns a pointer to the last occurence of a valid path separator in
// Returns a pointer to the last occurrence of a valid path separator in
// the FilePath. On Windows, for example, both '/' and '\' are valid path
// separators. Returns NULL if no path separator was found.
const char* FindLastPathSeparator() const;

View File

@ -849,6 +849,12 @@
</description>
</property>
<property>
<name>fs.swift.impl</name>
<value>org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem</value>
<description>The implementation class of the OpenStack Swift Filesystem</description>
</property>
<property>
<name>fs.automatic.close</name>
<value>true</value>

View File

@ -647,7 +647,7 @@ setrep
Usage: `hadoop fs -setrep [-R] [-w] <numReplicas> <path> `
Changes the replication factor of a file. If *path* is a directory then the command recursively changes the replication factor of all files under the directory tree rooted at *path*.
Changes the replication factor of a file. If *path* is a directory then the command recursively changes the replication factor of all files under the directory tree rooted at *path*. The EC files will be ignored when executing this command.
Options:

View File

@ -86,11 +86,15 @@ Get the status of a path
stat.length = 0
stat.isdir = False
stat.symlink = FS.Symlinks[p]
if inEncryptionZone(FS, p) :
stat.isEncrypted = True
else
stat.isEncrypted = False
stat.hasAcl = hasACL(FS, p)
stat.isEncrypted = inEncryptionZone(FS, p)
stat.isErasureCoded = isErasureCoded(FS, p)
The returned `FileStatus` status of the path additionally carries details on
ACL, encryption and erasure coding information. `getFileStatus(Path p).hasAcl()`
can be queried to find if the path has an ACL. `getFileStatus(Path p).isEncrypted()`
can be queried to find if the path is encrypted. `getFileStatus(Path p).isErasureCoded()`
will tell if the path is erasure coded or not.
### `Path getHomeDirectory()`

View File

@ -392,3 +392,88 @@ Object stores with these characteristics, can not be used as a direct replacemen
for HDFS. In terms of this specification, their implementations of the
specified operations do not match those required. They are considered supported
by the Hadoop development community, but not to the same extent as HDFS.
#### Timestamps
`FileStatus` entries have a modification time and an access time.
1. The exact behavior as to when these timestamps are set and whether or not they are valid
varies between filesystems, and potentially between individual installations of a filesystem.
1. The granularity of the timestamps is again, specific to both a filesystem
and potentially individual installations.
The HDFS filesystem does not update the modification time while it is being written to.
Specifically
* `FileSystem.create()` creation: a zero-byte file is listed; the modification time is
set to the current time as seen on the NameNode.
* Writes to a file via the output stream returned in the `create()` call: the modification
time *does not change*.
* When `OutputStream.close()` is called, all remaining data is written, the file closed and
the NameNode updated with the final size of the file. The modification time is set to
the time the file was closed.
* Opening a file for appends via an `append()` operation does not change the modification
time of the file until the `close()` call is made on the output stream.
* `FileSystem.setTimes()` can be used to explicitly set the time on a file.
* When a file is renamed, its modification time is not changed, but the source
and destination directories have their modification times updated.
* The rarely used operations: `FileSystem.concat()`, `createSnapshot()`,
`createSymlink()` and `truncate()` all update the modification time.
* The access time granularity is set in milliseconds `dfs.namenode.access.time.precision`;
the default granularity is 1 hour. If the precision is set to zero, access times
are not recorded.
* If a modification or access time is not set, the value of that `FileStatus`
field is 0.
Other filesystems may have different behaviors. In particular,
* Access times may or may not be supported; even if the underlying FS may support access times,
the option it is often disabled for performance reasons.
* The granularity of the timestamps is an implementation-specific detail.
Object stores have an even vaguer view of time, which can be summarized as
"it varies".
* The timestamp granularity is likely to be 1 second, that being the granularity
of timestamps returned in HTTP HEAD and GET requests.
* Access times are likely to be unset. That is, `FileStatus.getAccessTime() == 0`.
* The modification timestamp for a newly created file MAY be that of the
`create()` call, or the actual time which the PUT request was initiated.
This may be in the `FileSystem.create()` call, the final
`OutputStream.close()` operation, some period in between.
* The modification time may not be updated in the `close()` call.
* The timestamp is likely to be in UTC or the TZ of the object store. If the
client is in a different timezone, the timestamp of objects may be ahead or
behind that of the client.
* Object stores with cached metadata databases (for example: AWS S3 with
an in-memory or a DynamoDB metadata store) may have timestamps generated
from the local system clock, rather than that of the service.
This is an optimization to avoid round-trip calls to the object stores.
+ A file's modification time is often the same as its creation time.
+ The `FileSystem.setTimes()` operation to set file timestamps *may* be ignored.
* `FileSystem.chmod()` may update modification times (example: Azure `wasb://`).
* If `FileSystem.append()` is supported, the changes and modification time
are likely to only become visible after the output stream is closed.
* Out-of-band operations to data in object stores (that is: direct requests
to object stores which bypass the Hadoop FileSystem APIs), may result
in different timestamps being stored and/or returned.
* As the notion of a directory structure is often simulated, the timestamps
of directories *may* be artificially generated &mdash;perhaps using the current
system time.
* As `rename()` operations are often implemented as a COPY + DELETE, the
timestamps of renamed objects may become that of the time the rename of an
object was started, rather than the timestamp of the source object.
* The exact timestamp behavior may vary between different object store installations,
even with the same timestore client.
Finally, note that the Apache Hadoop project cannot make any guarantees about
whether the timestamp behavior of a remote object store will remain consistent
over time: they are third-party services, usually accessed via third-party libraries.
The best strategy here is "experiment with the exact endpoint you intend to work with".
Furthermore, if you intend to use any caching/consistency layer, test with that
feature enabled. Retest after updates to Hadoop releases, and endpoint object
store updates.

View File

@ -298,6 +298,11 @@ public class CLITestHelper {
return compareOutput;
}
private boolean compareTextExitCode(ComparatorData compdata,
Result cmdResult) {
return compdata.getExitCode() == cmdResult.getExitCode();
}
/***********************************
************* TESTS RUNNER
@ -330,10 +335,17 @@ public class CLITestHelper {
final String comptype = cd.getComparatorType();
boolean compareOutput = false;
boolean compareExitCode = false;
if (! comptype.equalsIgnoreCase("none")) {
compareOutput = compareTestOutput(cd, cmdResult);
overallTCResult &= compareOutput;
if (cd.getExitCode() == -1) {
// No need to check exit code if not specified
compareExitCode = true;
} else {
compareExitCode = compareTextExitCode(cd, cmdResult);
}
overallTCResult &= (compareOutput & compareExitCode);
}
cd.setExitCode(cmdResult.getExitCode());
@ -391,6 +403,7 @@ public class CLITestHelper {
testComparators = new ArrayList<ComparatorData>();
} else if (qName.equals("comparator")) {
comparatorData = new ComparatorData();
comparatorData.setExitCode(-1);
}
charString = "";
}
@ -422,6 +435,8 @@ public class CLITestHelper {
comparatorData.setComparatorType(charString);
} else if (qName.equals("expected-output")) {
comparatorData.setExpectedOutput(charString);
} else if (qName.equals("expected-exit-code")) {
comparatorData.setExitCode(Integer.valueOf(charString));
} else if (qName.equals("test")) {
if (!Shell.WINDOWS || runOnWindows) {
testsFromConfigFile.add(td);

View File

@ -75,7 +75,7 @@ public abstract class CommandExecutor {
System.setErr(new PrintStream(bao));
try {
execute(cmd);
exitCode = execute(cmd);
} catch (Exception e) {
e.printStackTrace();
lastException = e;
@ -87,7 +87,7 @@ public abstract class CommandExecutor {
return new Result(bao.toString(), exitCode, lastException, cmd);
}
protected abstract void execute(final String cmd) throws Exception;
protected abstract int execute(String cmd) throws Exception;
public static class Result {
final String commandOutput;

View File

@ -30,8 +30,8 @@ public class FSCmdExecutor extends CommandExecutor {
}
@Override
protected void execute(final String cmd) throws Exception{
protected int execute(final String cmd) throws Exception{
String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode);
ToolRunner.run(shell, args);
return ToolRunner.run(shell, args);
}
}

View File

@ -99,10 +99,22 @@ public class TestConfiguration extends TestCase {
out.close();
}
private void addInclude(String filename) throws IOException{
out.write("<xi:include href=\"" + filename + "\" xmlns:xi=\"http://www.w3.org/2001/XInclude\" />\n ");
private void startInclude(String filename) throws IOException {
out.write("<xi:include href=\"" + filename + "\" xmlns:xi=\"http://www.w3.org/2001/XInclude\" >\n ");
}
private void endInclude() throws IOException{
out.write("</xi:include>\n ");
}
private void startFallback() throws IOException {
out.write("<xi:fallback>\n ");
}
private void endFallback() throws IOException {
out.write("</xi:fallback>\n ");
}
public void testInputStreamResource() throws Exception {
StringWriter writer = new StringWriter();
out = new BufferedWriter(writer);
@ -507,7 +519,8 @@ public class TestConfiguration extends TestCase {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
addInclude(CONFIG2);
startInclude(CONFIG2);
endInclude();
appendProperty("e","f");
appendProperty("g","h");
endConfig();
@ -522,6 +535,44 @@ public class TestConfiguration extends TestCase {
tearDown();
}
public void testIncludesWithFallback() throws Exception {
tearDown();
out=new BufferedWriter(new FileWriter(CONFIG2));
startConfig();
appendProperty("a","b");
appendProperty("c","d");
endConfig();
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
startInclude(CONFIG2);
startFallback();
appendProperty("a", "b.fallback");
appendProperty("c", "d.fallback", true);
endFallback();
endInclude();
appendProperty("e","f");
appendProperty("g","h");
startInclude("MissingConfig.xml");
startFallback();
appendProperty("i", "j.fallback");
appendProperty("k", "l.fallback", true);
endFallback();
endInclude();
endConfig();
// verify that the includes file contains all properties
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals("b", conf.get("a"));
assertEquals("d", conf.get("c"));
assertEquals("f", conf.get("e"));
assertEquals("h", conf.get("g"));
assertEquals("j.fallback", conf.get("i"));
assertEquals("l.fallback", conf.get("k"));
tearDown();
}
public void testRelativeIncludes() throws Exception {
tearDown();
String relConfig = new File("./tmp/test-config.xml").getAbsolutePath();
@ -536,7 +587,8 @@ public class TestConfiguration extends TestCase {
out = new BufferedWriter(new FileWriter(relConfig));
startConfig();
// Add the relative path instead of the absolute one.
addInclude(new File(relConfig2).getName());
startInclude(new File(relConfig2).getName());
endInclude();
appendProperty("c", "d");
endConfig();

View File

@ -36,6 +36,8 @@ import org.junit.Assert;
import static org.junit.Assert.*;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
import static org.apache.hadoop.fs.CreateFlag.*;
@ -60,6 +62,9 @@ import static org.apache.hadoop.fs.CreateFlag.*;
* </p>
*/
public abstract class FileContextMainOperationsBaseTest {
protected static final Logger LOG =
LoggerFactory.getLogger(FileContextMainOperationsBaseTest.class);
private static String TEST_DIR_AAA2 = "test/hadoop2/aaa";
private static String TEST_DIR_AAA = "test/hadoop/aaa";
@ -111,9 +116,19 @@ public abstract class FileContextMainOperationsBaseTest {
@After
public void tearDown() throws Exception {
if (fc != null) {
boolean del = fc.delete(new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc), new Path("test")), true);
assertTrue(del);
fc.delete(localFsRootPath, true);
final Path testRoot = fileContextTestHelper.getAbsoluteTestRootPath(fc);
LOG.info("Deleting test root path {}", testRoot);
try {
fc.delete(testRoot, true);
} catch (Exception e) {
LOG.error("Error when deleting test root path " + testRoot, e);
}
try {
fc.delete(localFsRootPath, true);
} catch (Exception e) {
LOG.error("Error when deleting localFsRootPath " + localFsRootPath, e);
}
}
}

View File

@ -24,8 +24,9 @@ import java.util.ArrayList;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.AccessControlException;
@ -45,8 +46,8 @@ import org.apache.hadoop.util.StringUtils;
* </p>
*/
public abstract class FileSystemContractBaseTest extends TestCase {
private static final Log LOG =
LogFactory.getLog(FileSystemContractBaseTest.class);
private static final Logger LOG =
LoggerFactory.getLogger(FileSystemContractBaseTest.class);
protected final static String TEST_UMASK = "062";
protected FileSystem fs;
@ -54,15 +55,46 @@ public abstract class FileSystemContractBaseTest extends TestCase {
@Override
protected void tearDown() throws Exception {
try {
if (fs != null) {
fs.delete(path("/test"), true);
if (fs != null) {
// some cases use this absolute path
if (rootDirTestEnabled()) {
cleanupDir(path("/FileSystemContractBaseTest"));
}
// others use this relative path against test base directory
cleanupDir(getTestBaseDir());
}
super.tearDown();
}
private void cleanupDir(Path p) {
try {
LOG.info("Deleting " + p);
fs.delete(p, true);
} catch (IOException e) {
LOG.error("Error deleting /test: " + e, e);
LOG.error("Error deleting test dir: " + p, e);
}
}
/**
* Test base directory for resolving relative test paths.
*
* The default value is /user/$USER/FileSystemContractBaseTest. Subclass may
* set specific test base directory.
*/
protected Path getTestBaseDir() {
return new Path(fs.getWorkingDirectory(), "FileSystemContractBaseTest");
}
/**
* For absolute path return the fully qualified path while for relative path
* return the fully qualified path against {@link #getTestBaseDir()}.
*/
protected final Path path(String pathString) {
Path p = new Path(pathString).makeQualified(fs.getUri(), getTestBaseDir());
LOG.info("Resolving {} -> {}", pathString, p);
return p;
}
protected int getBlockSize() {
return 1024;
}
@ -80,6 +112,17 @@ public abstract class FileSystemContractBaseTest extends TestCase {
return true;
}
/**
* Override this if the filesystem does not enable testing root directories.
*
* If this returns true, the test will create and delete test directories and
* files under root directory, which may have side effects, e.g. fail tests
* with PermissionDenied exceptions.
*/
protected boolean rootDirTestEnabled() {
return true;
}
/**
* Override this if the filesystem is not case sensitive
* @return true if the case detection/preservation tests should run
@ -102,24 +145,24 @@ public abstract class FileSystemContractBaseTest extends TestCase {
Path workDir = path(getDefaultWorkingDirectory());
assertEquals(workDir, fs.getWorkingDirectory());
fs.setWorkingDirectory(path("."));
fs.setWorkingDirectory(fs.makeQualified(new Path(".")));
assertEquals(workDir, fs.getWorkingDirectory());
fs.setWorkingDirectory(path(".."));
fs.setWorkingDirectory(fs.makeQualified(new Path("..")));
assertEquals(workDir.getParent(), fs.getWorkingDirectory());
Path relativeDir = path("hadoop");
Path relativeDir = fs.makeQualified(new Path("testWorkingDirectory"));
fs.setWorkingDirectory(relativeDir);
assertEquals(relativeDir, fs.getWorkingDirectory());
Path absoluteDir = path("/test/hadoop");
Path absoluteDir = path("/FileSystemContractBaseTest/testWorkingDirectory");
fs.setWorkingDirectory(absoluteDir);
assertEquals(absoluteDir, fs.getWorkingDirectory());
}
public void testMkdirs() throws Exception {
Path testDir = path("/test/hadoop");
Path testDir = path("testMkdirs");
assertFalse(fs.exists(testDir));
assertFalse(fs.isFile(testDir));
@ -145,14 +188,15 @@ public abstract class FileSystemContractBaseTest extends TestCase {
}
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir = path("/test/hadoop");
Path testDir = path("testMkdirsFailsForSubdirectoryOfExistingFile");
assertFalse(fs.exists(testDir));
assertTrue(fs.mkdirs(testDir));
assertTrue(fs.exists(testDir));
createFile(path("/test/hadoop/file"));
createFile(path("testMkdirsFailsForSubdirectoryOfExistingFile/file"));
Path testSubDir = path("/test/hadoop/file/subdir");
Path testSubDir = path(
"testMkdirsFailsForSubdirectoryOfExistingFile/file/subdir");
try {
fs.mkdirs(testSubDir);
fail("Should throw IOException.");
@ -167,7 +211,8 @@ public abstract class FileSystemContractBaseTest extends TestCase {
// file missing execute permission.
}
Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir");
Path testDeepSubDir = path(
"testMkdirsFailsForSubdirectoryOfExistingFile/file/deep/sub/dir");
try {
fs.mkdirs(testDeepSubDir);
fail("Should throw IOException.");
@ -190,7 +235,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
String oldUmask = conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY);
try {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, TEST_UMASK);
final Path dir = path("/test/newDir");
final Path dir = path("newDir");
assertTrue(fs.mkdirs(dir, new FsPermission((short) 0777)));
FileStatus status = fs.getFileStatus(dir);
assertTrue(status.isDirectory());
@ -223,7 +268,8 @@ public abstract class FileSystemContractBaseTest extends TestCase {
public void testGetFileStatusThrowsExceptionForNonExistentFile()
throws Exception {
try {
fs.getFileStatus(path("/test/hadoop/file"));
fs.getFileStatus(
path("testGetFileStatusThrowsExceptionForNonExistentFile/file"));
fail("Should throw FileNotFoundException");
} catch (FileNotFoundException e) {
// expected
@ -232,7 +278,8 @@ public abstract class FileSystemContractBaseTest extends TestCase {
public void testListStatusThrowsExceptionForNonExistentFile() throws Exception {
try {
fs.listStatus(path("/test/hadoop/file"));
fs.listStatus(
path("testListStatusThrowsExceptionForNonExistentFile/file"));
fail("Should throw FileNotFoundException");
} catch (FileNotFoundException fnfe) {
// expected
@ -240,30 +287,32 @@ public abstract class FileSystemContractBaseTest extends TestCase {
}
public void testListStatus() throws Exception {
Path[] testDirs = { path("/test/hadoop/a"),
path("/test/hadoop/b"),
path("/test/hadoop/c/1"), };
final Path[] testDirs = {
path("testListStatus/a"),
path("testListStatus/b"),
path("testListStatus/c/1")
};
assertFalse(fs.exists(testDirs[0]));
for (Path path : testDirs) {
assertTrue(fs.mkdirs(path));
}
FileStatus[] paths = fs.listStatus(path("/test"));
FileStatus[] paths = fs.listStatus(path("."));
assertEquals(1, paths.length);
assertEquals(path("/test/hadoop"), paths[0].getPath());
assertEquals(path("testListStatus"), paths[0].getPath());
paths = fs.listStatus(path("/test/hadoop"));
paths = fs.listStatus(path("testListStatus"));
assertEquals(3, paths.length);
ArrayList<Path> list = new ArrayList<Path>();
for (FileStatus fileState : paths) {
list.add(fileState.getPath());
}
assertTrue(list.contains(path("/test/hadoop/a")));
assertTrue(list.contains(path("/test/hadoop/b")));
assertTrue(list.contains(path("/test/hadoop/c")));
assertTrue(list.contains(path("testListStatus/a")));
assertTrue(list.contains(path("testListStatus/b")));
assertTrue(list.contains(path("testListStatus/c")));
paths = fs.listStatus(path("/test/hadoop/a"));
paths = fs.listStatus(path("testListStatus/a"));
assertEquals(0, paths.length);
}
@ -294,12 +343,12 @@ public abstract class FileSystemContractBaseTest extends TestCase {
* @throws IOException on IO failures
*/
protected void writeReadAndDelete(int len) throws IOException {
Path path = path("/test/hadoop/file");
Path path = path("writeReadAndDelete/file");
writeAndRead(path, data, len, false, true);
}
public void testOverwrite() throws IOException {
Path path = path("/test/hadoop/file");
Path path = path("testOverwrite/file");
fs.mkdirs(path.getParent());
@ -325,7 +374,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
}
public void testWriteInNonExistentDirectory() throws IOException {
Path path = path("/test/hadoop/file");
Path path = path("testWriteInNonExistentDirectory/file");
assertFalse("Parent exists", fs.exists(path.getParent()));
createFile(path);
@ -335,15 +384,15 @@ public abstract class FileSystemContractBaseTest extends TestCase {
}
public void testDeleteNonExistentFile() throws IOException {
Path path = path("/test/hadoop/file");
Path path = path("testDeleteNonExistentFile/file");
assertFalse("Path exists: " + path, fs.exists(path));
assertFalse("No deletion", fs.delete(path, true));
}
public void testDeleteRecursively() throws IOException {
Path dir = path("/test/hadoop");
Path file = path("/test/hadoop/file");
Path subdir = path("/test/hadoop/subdir");
Path dir = path("testDeleteRecursively");
Path file = path("testDeleteRecursively/file");
Path subdir = path("testDeleteRecursively/subdir");
createFile(file);
assertTrue("Created subdir", fs.mkdirs(subdir));
@ -369,7 +418,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
}
public void testDeleteEmptyDirectory() throws IOException {
Path dir = path("/test/hadoop");
Path dir = path("testDeleteEmptyDirectory");
assertTrue(fs.mkdirs(dir));
assertTrue("Dir exists", fs.exists(dir));
assertTrue("Deleted", fs.delete(dir, false));
@ -379,26 +428,26 @@ public abstract class FileSystemContractBaseTest extends TestCase {
public void testRenameNonExistentPath() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/path");
Path dst = path("/test/new/newpath");
Path src = path("testRenameNonExistentPath/path");
Path dst = path("testRenameNonExistentPathNew/newpath");
rename(src, dst, false, false, false);
}
public void testRenameFileMoveToNonExistentDirectory() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/file");
Path src = path("testRenameFileMoveToNonExistentDirectory/file");
createFile(src);
Path dst = path("/test/new/newfile");
Path dst = path("testRenameFileMoveToNonExistentDirectoryNew/newfile");
rename(src, dst, false, true, false);
}
public void testRenameFileMoveToExistingDirectory() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/file");
Path src = path("testRenameFileMoveToExistingDirectory/file");
createFile(src);
Path dst = path("/test/new/newfile");
Path dst = path("testRenameFileMoveToExistingDirectoryNew/newfile");
fs.mkdirs(dst.getParent());
rename(src, dst, true, false, true);
}
@ -406,9 +455,9 @@ public abstract class FileSystemContractBaseTest extends TestCase {
public void testRenameFileAsExistingFile() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/file");
Path src = path("testRenameFileAsExistingFile/file");
createFile(src);
Path dst = path("/test/new/newfile");
Path dst = path("testRenameFileAsExistingFileNew/newfile");
createFile(dst);
rename(src, dst, false, true, true);
}
@ -416,83 +465,81 @@ public abstract class FileSystemContractBaseTest extends TestCase {
public void testRenameFileAsExistingDirectory() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/file");
Path src = path("testRenameFileAsExistingDirectory/file");
createFile(src);
Path dst = path("/test/new/newdir");
Path dst = path("testRenameFileAsExistingDirectoryNew/newdir");
fs.mkdirs(dst);
rename(src, dst, true, false, true);
assertIsFile(path("/test/new/newdir/file"));
assertIsFile(path("testRenameFileAsExistingDirectoryNew/newdir/file"));
}
public void testRenameDirectoryMoveToNonExistentDirectory()
throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/dir");
Path src = path("testRenameDirectoryMoveToNonExistentDirectory/dir");
fs.mkdirs(src);
Path dst = path("/test/new/newdir");
Path dst = path("testRenameDirectoryMoveToNonExistentDirectoryNew/newdir");
rename(src, dst, false, true, false);
}
public void testRenameDirectoryMoveToExistingDirectory() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/dir");
Path src = path("testRenameDirectoryMoveToExistingDirectory/dir");
fs.mkdirs(src);
createFile(path("/test/hadoop/dir/file1"));
createFile(path("/test/hadoop/dir/subdir/file2"));
createFile(path(src + "/file1"));
createFile(path(src + "/subdir/file2"));
Path dst = path("/test/new/newdir");
Path dst = path("testRenameDirectoryMoveToExistingDirectoryNew/newdir");
fs.mkdirs(dst.getParent());
rename(src, dst, true, false, true);
assertFalse("Nested file1 exists",
fs.exists(path("/test/hadoop/dir/file1")));
fs.exists(path(src + "/file1")));
assertFalse("Nested file2 exists",
fs.exists(path("/test/hadoop/dir/subdir/file2")));
fs.exists(path(src + "/subdir/file2")));
assertTrue("Renamed nested file1 exists",
fs.exists(path("/test/new/newdir/file1")));
fs.exists(path(dst + "/file1")));
assertTrue("Renamed nested exists",
fs.exists(path("/test/new/newdir/subdir/file2")));
fs.exists(path(dst + "/subdir/file2")));
}
public void testRenameDirectoryAsExistingFile() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/dir");
Path src = path("testRenameDirectoryAsExistingFile/dir");
fs.mkdirs(src);
Path dst = path("/test/new/newfile");
Path dst = path("testRenameDirectoryAsExistingFileNew/newfile");
createFile(dst);
rename(src, dst, false, true, true);
}
public void testRenameDirectoryAsExistingDirectory() throws Exception {
if (!renameSupported()) return;
Path src = path("/test/hadoop/dir");
final Path src = path("testRenameDirectoryAsExistingDirectory/dir");
fs.mkdirs(src);
createFile(path("/test/hadoop/dir/file1"));
createFile(path("/test/hadoop/dir/subdir/file2"));
Path dst = path("/test/new/newdir");
createFile(path(src + "/file1"));
createFile(path(src + "/subdir/file2"));
final Path dst = path("testRenameDirectoryAsExistingDirectoryNew/newdir");
fs.mkdirs(dst);
rename(src, dst, true, false, true);
assertTrue("Destination changed",
fs.exists(path("/test/new/newdir/dir")));
fs.exists(path(dst + "/dir")));
assertFalse("Nested file1 exists",
fs.exists(path("/test/hadoop/dir/file1")));
fs.exists(path(src + "/file1")));
assertFalse("Nested file2 exists",
fs.exists(path("/test/hadoop/dir/subdir/file2")));
fs.exists(path(src + "/dir/subdir/file2")));
assertTrue("Renamed nested file1 exists",
fs.exists(path("/test/new/newdir/dir/file1")));
fs.exists(path(dst + "/dir/file1")));
assertTrue("Renamed nested exists",
fs.exists(path("/test/new/newdir/dir/subdir/file2")));
fs.exists(path(dst + "/dir/subdir/file2")));
}
public void testInputStreamClosedTwice() throws IOException {
//HADOOP-4760 according to Closeable#close() closing already-closed
//streams should have no effect.
Path src = path("/test/hadoop/file");
Path src = path("testInputStreamClosedTwice/file");
createFile(src);
FSDataInputStream in = fs.open(src);
in.close();
@ -502,18 +549,13 @@ public abstract class FileSystemContractBaseTest extends TestCase {
public void testOutputStreamClosedTwice() throws IOException {
//HADOOP-4760 according to Closeable#close() closing already-closed
//streams should have no effect.
Path src = path("/test/hadoop/file");
Path src = path("testOutputStreamClosedTwice/file");
FSDataOutputStream out = fs.create(src);
out.writeChar('H'); //write some data
out.close();
out.close();
}
protected Path path(String pathString) {
return new Path(pathString).makeQualified(fs.getUri(),
fs.getWorkingDirectory());
}
protected void createFile(Path path) throws IOException {
FSDataOutputStream out = fs.create(path);
out.write(data, 0, data.length);
@ -541,7 +583,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
byte[] filedata1 = dataset(blockSize * 2, 'A', 26);
byte[] filedata2 = dataset(blockSize * 2, 'a', 26);
Path path = path("/test/hadoop/file-overwrite");
Path path = path("testOverWriteAndRead/file-overwrite");
writeAndRead(path, filedata1, blockSize, true, false);
writeAndRead(path, filedata2, blockSize, true, false);
writeAndRead(path, filedata1, blockSize * 2, true, false);
@ -561,7 +603,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
LOG.info("Skipping test");
return;
}
String mixedCaseFilename = "/test/UPPER.TXT";
String mixedCaseFilename = "testFilesystemIsCaseSensitive";
Path upper = path(mixedCaseFilename);
Path lower = path(StringUtils.toLowerCase(mixedCaseFilename));
assertFalse("File exists" + upper, fs.exists(upper));
@ -592,7 +634,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
* @throws Exception on failures
*/
public void testZeroByteFilesAreFiles() throws Exception {
Path src = path("/test/testZeroByteFilesAreFiles");
Path src = path("testZeroByteFilesAreFiles");
//create a zero byte file
FSDataOutputStream out = fs.create(src);
out.close();
@ -605,7 +647,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
* @throws Exception on failures
*/
public void testMultiByteFilesAreFiles() throws Exception {
Path src = path("/test/testMultiByteFilesAreFiles");
Path src = path("testMultiByteFilesAreFiles");
FSDataOutputStream out = fs.create(src);
out.writeUTF("testMultiByteFilesAreFiles");
out.close();
@ -629,10 +671,14 @@ public abstract class FileSystemContractBaseTest extends TestCase {
* @throws Exception on failures
*/
public void testRenameRootDirForbidden() throws Exception {
if (!rootDirTestEnabled()) {
return;
}
if (!renameSupported()) return;
rename(path("/"),
path("/test/newRootDir"),
path("testRenameRootDirForbidden"),
false, true, false);
}
@ -644,7 +690,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
public void testRenameChildDirForbidden() throws Exception {
if (!renameSupported()) return;
LOG.info("testRenameChildDirForbidden");
Path parentdir = path("/test/parentdir");
Path parentdir = path("testRenameChildDirForbidden");
fs.mkdirs(parentdir);
Path childFile = new Path(parentdir, "childfile");
createFile(childFile);
@ -663,9 +709,9 @@ public abstract class FileSystemContractBaseTest extends TestCase {
*/
public void testRenameToDirWithSamePrefixAllowed() throws Throwable {
if (!renameSupported()) return;
Path parentdir = path("test/parentdir");
final Path parentdir = path("testRenameToDirWithSamePrefixAllowed");
fs.mkdirs(parentdir);
Path dest = path("test/parentdirdest");
final Path dest = path("testRenameToDirWithSamePrefixAllowedDest");
rename(parentdir, dest, true, false, true);
}
@ -677,7 +723,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
if (!renameSupported()) {
return;
}
Path parentdir = path("test/parentdir");
Path parentdir = path("testRenameDirToSelf");
fs.mkdirs(parentdir);
Path child = new Path(parentdir, "child");
createFile(child);
@ -696,7 +742,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
if (!renameSupported()) {
return;
}
Path testdir = path("test/dir");
Path testdir = path("testMoveDirUnderParent");
fs.mkdirs(testdir);
Path parent = testdir.getParent();
//the outcome here is ambiguous, so is not checked
@ -711,7 +757,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
*/
public void testRenameFileToSelf() throws Throwable {
if (!renameSupported()) return;
Path filepath = path("test/file");
Path filepath = path("testRenameFileToSelf");
createFile(filepath);
//HDFS expects rename src, src -> true
rename(filepath, filepath, true, true, true);
@ -725,7 +771,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
*/
public void testMoveFileUnderParent() throws Throwable {
if (!renameSupported()) return;
Path filepath = path("test/file");
Path filepath = path("testMoveFileUnderParent");
createFile(filepath);
//HDFS expects rename src, src -> true
rename(filepath, filepath, true, true, true);
@ -734,15 +780,23 @@ public abstract class FileSystemContractBaseTest extends TestCase {
}
public void testLSRootDir() throws Throwable {
if (!rootDirTestEnabled()) {
return;
}
Path dir = path("/");
Path child = path("/test");
Path child = path("/FileSystemContractBaseTest");
createFile(child);
assertListFilesFinds(dir, child);
}
public void testListStatusRootDir() throws Throwable {
if (!rootDirTestEnabled()) {
return;
}
Path dir = path("/");
Path child = path("/test");
Path child = path("/FileSystemContractBaseTest");
createFile(child);
assertListStatusFinds(dir, child);
}

View File

@ -117,6 +117,7 @@ public class TestAfsCheckPath {
}
@Override
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
// deliberately empty
return null;

View File

@ -295,11 +295,15 @@ public class TestFileStatus {
expected.append("permission=").append(fileStatus.getPermission()).append("; ");
if(fileStatus.isSymlink()) {
expected.append("isSymlink=").append(true).append("; ");
expected.append("symlink=").append(fileStatus.getSymlink()).append("}");
expected.append("symlink=").append(fileStatus.getSymlink()).append("; ");
} else {
expected.append("isSymlink=").append(false).append("}");
expected.append("isSymlink=").append(false).append("; ");
}
expected.append("hasAcl=").append(fileStatus.hasAcl()).append("; ");
expected.append("isEncrypted=").append(
fileStatus.isEncrypted()).append("; ");
expected.append("isErasureCoded=").append(
fileStatus.isErasureCoded()).append("}");
assertEquals(expected.toString(), fileStatus.toString());
}
}

View File

@ -25,6 +25,8 @@ import java.util.Iterator;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.viewfs.ConfigUtil;
public class TestFilterFs extends TestCase {
@ -65,4 +67,14 @@ public class TestFilterFs extends TestCase {
}
}
// Test that FilterFs will accept an AbstractFileSystem to be filtered which
// has an optional authority, such as ViewFs
public void testFilteringWithNonrequiredAuthority() throws Exception {
Configuration conf = new Configuration();
ConfigUtil.addLink(conf, "custom", "/mnt", URI.create("file:///"));
FileContext fc =
FileContext.getFileContext(URI.create("viewfs://custom/"), conf);
new FilterFs(fc.getDefaultFileSystem()) {};
}
}

View File

@ -19,10 +19,13 @@ package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import java.io.*;
@ -636,4 +639,55 @@ public class TestLocalFileSystem {
FileStatus[] stats = fs.listStatus(path);
assertTrue(stats != null && stats.length == 1 && stats[0] == stat);
}
@Test
public void testFSOutputStreamBuilder() throws Exception {
Path path = new Path(TEST_ROOT_DIR, "testBuilder");
try {
FSDataOutputStreamBuilder builder =
fileSys.newFSDataOutputStreamBuilder(path);
FSDataOutputStream out = builder.build();
String content = "Create with a generic type of createBuilder!";
byte[] contentOrigin = content.getBytes("UTF8");
out.write(contentOrigin);
out.close();
FSDataInputStream input = fileSys.open(path);
byte[] buffer =
new byte[(int) (fileSys.getFileStatus(path).getLen())];
input.readFully(0, buffer);
input.close();
Assert.assertArrayEquals("The data be read should equals with the "
+ "data written.", contentOrigin, buffer);
} catch (IOException e) {
throw e;
}
// Test value not being set for replication, block size, buffer size
// and permission
FSDataOutputStreamBuilder builder =
fileSys.newFSDataOutputStreamBuilder(path);
builder.build();
Assert.assertEquals("Should be default block size",
builder.getBlockSize(), fileSys.getDefaultBlockSize());
Assert.assertEquals("Should be default replication factor",
builder.getReplication(), fileSys.getDefaultReplication());
Assert.assertEquals("Should be default buffer size",
builder.getBufferSize(),
fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
IO_FILE_BUFFER_SIZE_DEFAULT));
Assert.assertEquals("Should be default permission",
builder.getPermission(), FsPermission.getFileDefault());
// Test set 0 to replication, block size and buffer size
builder = fileSys.newFSDataOutputStreamBuilder(path);
builder.setBufferSize(0).setBlockSize(0).setReplication((short) 0);
Assert.assertEquals("Block size should be 0",
builder.getBlockSize(), 0);
Assert.assertEquals("Replication factor should be 0",
builder.getReplication(), 0);
Assert.assertEquals("Buffer size should be 0",
builder.getBufferSize(), 0);
}
}

View File

@ -33,6 +33,8 @@ public class TestRawLocalFileSystemContract extends FileSystemContractBaseTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestRawLocalFileSystemContract.class);
private final static Path TEST_BASE_DIR =
new Path(GenericTestUtils.getTempPath(""));
@Before
public void setUp() throws Exception {
@ -51,21 +53,25 @@ public class TestRawLocalFileSystemContract extends FileSystemContractBaseTest {
return false;
}
/**
* Disabling testing root operation.
*
* Writing to root directory on the local file system may get permission
* denied exception, or even worse, delete/overwrite files accidentally.
*/
@Override
protected boolean rootDirTestEnabled() {
return false;
}
@Override
public String getDefaultWorkingDirectory() {
return fs.getWorkingDirectory().toUri().getPath();
}
@Override
protected Path path(String pathString) {
// For testWorkingDirectory
if (pathString.equals(getDefaultWorkingDirectory()) ||
pathString.equals(".") || pathString.equals("..")) {
return super.path(pathString);
}
return new Path(GenericTestUtils.getTempPath(pathString)).
makeQualified(fs.getUri(), fs.getWorkingDirectory());
protected Path getTestBaseDir() {
return TEST_BASE_DIR;
}
@Override

View File

@ -519,6 +519,33 @@ public class ContractTestUtils extends Assert {
fileStatus.isDirectory());
}
/**
* Assert that a path is Erasure Coded.
*
* @param fs filesystem
* @param path path of the file or directory
* @throws IOException on File IO problems
*/
public static void assertErasureCoded(final FileSystem fs, final Path path)
throws IOException {
FileStatus fileStatus = fs.getFileStatus(path);
assertTrue(path + " must be erasure coded!", fileStatus.isErasureCoded());
}
/**
* Assert that a path is not Erasure Coded.
*
* @param fs filesystem
* @param path path of the file or directory
* @throws IOException on File IO problems
*/
public static void assertNotErasureCoded(final FileSystem fs,
final Path path) throws IOException {
FileStatus fileStatus = fs.getFileStatus(path);
assertFalse(path + " should not be erasure coded!",
fileStatus.isErasureCoded());
}
/**
* Write the text to a file, returning the converted byte array
* for use in validating the round trip.

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.test.GenericTestUtils;
@ -70,8 +71,14 @@ public class TestViewfsFileStatus {
ConfigUtil.addLink(conf, "/foo/bar/baz", TEST_DIR.toURI());
FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
assertEquals(ViewFileSystem.class, vfs.getClass());
FileStatus stat = vfs.getFileStatus(new Path("/foo/bar/baz", testfilename));
Path path = new Path("/foo/bar/baz", testfilename);
FileStatus stat = vfs.getFileStatus(path);
assertEquals(content.length, stat.getLen());
ContractTestUtils.assertNotErasureCoded(vfs, path);
assertTrue(path + " should have erasure coding unset in " +
"FileStatus#toString(): " + stat,
stat.toString().contains("isErasureCoded=false"));
// check serialization/deserialization
DataOutputBuffer dob = new DataOutputBuffer();
stat.write(dob);
@ -80,6 +87,7 @@ public class TestViewfsFileStatus {
FileStatus deSer = new FileStatus();
deSer.readFields(dib);
assertEquals(content.length, deSer.getLen());
assertFalse(deSer.isErasureCoded());
}
// Tests that ViewFileSystem.getFileChecksum calls res.targetFileSystem

View File

@ -25,6 +25,13 @@ import static org.apache.hadoop.fs.FileContextTestHelper.isFile;
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.FileNotFoundException;
import java.io.IOException;
@ -32,19 +39,26 @@ import java.net.URI;
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.FileContextTestHelper.fileType;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.local.LocalConfigKeys;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.AclUtil;
@ -56,7 +70,6 @@ import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
/**
@ -319,6 +332,16 @@ abstract public class ViewFsBaseTest {
}
}
Assert.assertTrue(dirFooPresent);
RemoteIterator<LocatedFileStatus> dirLocatedContents =
fcView.listLocatedStatus(new Path("/targetRoot/"));
dirFooPresent = false;
while (dirLocatedContents.hasNext()) {
FileStatus fileStatus = dirLocatedContents.next();
if (fileStatus.getPath().getName().equals("dirFoo")) {
dirFooPresent = true;
}
}
Assert.assertTrue(dirFooPresent);
}
// rename across mount points that point to same target also fail
@ -450,24 +473,23 @@ abstract public class ViewFsBaseTest {
}
@Test
public void testGetFileChecksum() throws AccessControlException
, UnresolvedLinkException, IOException {
AbstractFileSystem mockAFS = Mockito.mock(AbstractFileSystem.class);
public void testGetFileChecksum() throws AccessControlException,
UnresolvedLinkException, IOException {
AbstractFileSystem mockAFS = mock(AbstractFileSystem.class);
InodeTree.ResolveResult<AbstractFileSystem> res =
new InodeTree.ResolveResult<AbstractFileSystem>(null, mockAFS , null,
new Path("someFile"));
@SuppressWarnings("unchecked")
InodeTree<AbstractFileSystem> fsState = Mockito.mock(InodeTree.class);
Mockito.when(fsState.resolve(Mockito.anyString()
, Mockito.anyBoolean())).thenReturn(res);
ViewFs vfs = Mockito.mock(ViewFs.class);
InodeTree<AbstractFileSystem> fsState = mock(InodeTree.class);
when(fsState.resolve(anyString(), anyBoolean())).thenReturn(res);
ViewFs vfs = mock(ViewFs.class);
vfs.fsState = fsState;
Mockito.when(vfs.getFileChecksum(new Path("/tmp/someFile")))
when(vfs.getFileChecksum(new Path("/tmp/someFile")))
.thenCallRealMethod();
vfs.getFileChecksum(new Path("/tmp/someFile"));
Mockito.verify(mockAFS).getFileChecksum(new Path("someFile"));
verify(mockAFS).getFileChecksum(new Path("someFile"));
}
@Test(expected=FileNotFoundException.class)
@ -820,4 +842,103 @@ abstract public class ViewFsBaseTest {
}
});
}
@Test
public void testRespectsServerDefaults() throws Exception {
FsServerDefaults targetDefs =
fcTarget.getDefaultFileSystem().getServerDefaults(new Path("/"));
FsServerDefaults viewDefs =
fcView.getDefaultFileSystem().getServerDefaults(new Path("/data"));
assertEquals(targetDefs.getReplication(), viewDefs.getReplication());
assertEquals(targetDefs.getBlockSize(), viewDefs.getBlockSize());
assertEquals(targetDefs.getBytesPerChecksum(),
viewDefs.getBytesPerChecksum());
assertEquals(targetDefs.getFileBufferSize(),
viewDefs.getFileBufferSize());
assertEquals(targetDefs.getWritePacketSize(),
viewDefs.getWritePacketSize());
assertEquals(targetDefs.getEncryptDataTransfer(),
viewDefs.getEncryptDataTransfer());
assertEquals(targetDefs.getTrashInterval(), viewDefs.getTrashInterval());
assertEquals(targetDefs.getChecksumType(), viewDefs.getChecksumType());
fcView.create(new Path("/data/file"), EnumSet.of(CreateFlag.CREATE))
.close();
FileStatus stat =
fcTarget.getFileStatus(new Path(targetTestRoot, "data/file"));
assertEquals(targetDefs.getReplication(), stat.getReplication());
}
@Test
public void testServerDefaultsInternalDir() throws Exception {
FsServerDefaults localDefs = LocalConfigKeys.getServerDefaults();
FsServerDefaults viewDefs = fcView
.getDefaultFileSystem().getServerDefaults(new Path("/internalDir"));
assertEquals(localDefs.getReplication(), viewDefs.getReplication());
assertEquals(localDefs.getBlockSize(), viewDefs.getBlockSize());
assertEquals(localDefs.getBytesPerChecksum(),
viewDefs.getBytesPerChecksum());
assertEquals(localDefs.getFileBufferSize(),
viewDefs.getFileBufferSize());
assertEquals(localDefs.getWritePacketSize(),
viewDefs.getWritePacketSize());
assertEquals(localDefs.getEncryptDataTransfer(),
viewDefs.getEncryptDataTransfer());
assertEquals(localDefs.getTrashInterval(), viewDefs.getTrashInterval());
assertEquals(localDefs.getChecksumType(), viewDefs.getChecksumType());
}
// Confirm that listLocatedStatus is delegated properly to the underlying
// AbstractFileSystem to allow for optimizations
@Test
public void testListLocatedStatus() throws IOException {
final Path mockTarget = new Path("mockfs://listLocatedStatus/foo");
final Path mountPoint = new Path("/fooMount");
final Configuration newConf = new Configuration();
newConf.setClass("fs.AbstractFileSystem.mockfs.impl", MockFs.class,
AbstractFileSystem.class);
ConfigUtil.addLink(newConf, mountPoint.toString(), mockTarget.toUri());
FileContext.getFileContext(URI.create("viewfs:///"), newConf)
.listLocatedStatus(mountPoint);
AbstractFileSystem mockFs = MockFs.getMockFs(mockTarget.toUri());
verify(mockFs).listLocatedStatus(new Path(mockTarget.toUri().getPath()));
verify(mockFs, never()).listStatus(any(Path.class));
verify(mockFs, never()).listStatusIterator(any(Path.class));
}
// Confirm that listStatus is delegated properly to the underlying
// AbstractFileSystem's listStatusIterator to allow for optimizations
@Test
public void testListStatusIterator() throws IOException {
final Path mockTarget = new Path("mockfs://listStatusIterator/foo");
final Path mountPoint = new Path("/fooMount");
final Configuration newConf = new Configuration();
newConf.setClass("fs.AbstractFileSystem.mockfs.impl", MockFs.class,
AbstractFileSystem.class);
ConfigUtil.addLink(newConf, mountPoint.toString(), mockTarget.toUri());
FileContext.getFileContext(URI.create("viewfs:///"), newConf)
.listStatus(mountPoint);
AbstractFileSystem mockFs = MockFs.getMockFs(mockTarget.toUri());
verify(mockFs).listStatusIterator(new Path(mockTarget.toUri().getPath()));
verify(mockFs, never()).listStatus(any(Path.class));
}
static class MockFs extends ChRootedFs {
private static Map<String, AbstractFileSystem> fsCache = new HashMap<>();
MockFs(URI uri, Configuration conf) throws URISyntaxException {
super(getMockFs(uri), new Path("/"));
}
static AbstractFileSystem getMockFs(URI uri) {
AbstractFileSystem mockFs = fsCache.get(uri.getAuthority());
if (mockFs == null) {
mockFs = mock(AbstractFileSystem.class);
when(mockFs.getUri()).thenReturn(uri);
when(mockFs.getUriDefaultPort()).thenReturn(1);
when(mockFs.getUriPath(any(Path.class))).thenCallRealMethod();
when(mockFs.isValidName(anyString())).thenReturn(true);
fsCache.put(uri.getAuthority(), mockFs);
}
return mockFs;
}
}
}

View File

@ -27,7 +27,6 @@ import static org.mockito.AdditionalMatchers.geq;
import static org.mockito.Mockito.*;
import org.mockito.stubbing.Answer;
import org.mockito.internal.matchers.GreaterThan;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.ArgumentCaptor;
@ -329,8 +328,8 @@ public class MetricsAsserts {
*/
public static void assertCounterGt(String name, long greater,
MetricsRecordBuilder rb) {
Assert.assertThat("Bad value for metric " + name, getLongCounter(name, rb),
new GreaterThan<Long>(greater));
Assert.assertTrue("Bad value for metric " + name,
getLongCounter(name, rb) > greater);
}
/**
@ -352,8 +351,8 @@ public class MetricsAsserts {
*/
public static void assertGaugeGt(String name, double greater,
MetricsRecordBuilder rb) {
Assert.assertThat("Bad value for metric " + name, getDoubleGauge(name, rb),
new GreaterThan<Double>(greater));
Assert.assertTrue("Bad value for metric " + name,
getDoubleGauge(name, rb) > greater);
}
/**

View File

@ -225,7 +225,7 @@ public abstract class MultithreadedTestUtil {
/**
* User method for any code to test repeating behavior of (as threads).
* @throws Exception throw an exception if a failure has occured.
* @throws Exception throw an exception if a failure has occurred.
*/
public abstract void doAnAction() throws Exception;
}

View File

@ -778,7 +778,7 @@
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^\s*rooted at &lt;path&gt;\.( )*</expected-output>
<expected-output>^\s*rooted at &lt;path&gt;\. The EC files will be ignored here\.( )*</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>

View File

@ -956,7 +956,7 @@ $H4 Re-encrypt Encrypted Key With The Latest KeyVersion
This command takes a previously generated encrypted key, and re-encrypts it using the latest KeyVersion encryption key in the KeyProvider. If the latest KeyVersion is the same as the one used to generate the encrypted key, the same encrypted key is returned.
This is usually useful after a [Rollover](Rollover_Key) of an encryption key. Re-encrypting the encrypted key will allow it to be encrypted using the latest version of the encryption key, but still with the same key material and initialization vector.
This is usually useful after a [Rollover](#Rollover_Key) of an encryption key. Re-encrypting the encrypted key will allow it to be encrypted using the latest version of the encryption key, but still with the same key material and initialization vector.
*REQUEST:*

View File

@ -81,6 +81,7 @@ public class SimpleTcpServer {
});
server.setOption("child.tcpNoDelay", true);
server.setOption("child.keepAlive", true);
server.setOption("child.reuseAddress", true);
server.setOption("reuseAddress", true);
// Listen to TCP port
@ -91,7 +92,7 @@ public class SimpleTcpServer {
LOG.info("Started listening to TCP requests at port " + boundPort + " for "
+ rpcProgram + " with workerCount " + workerCount);
}
// boundPort will be set only after server starts
public int getBoundPort() {
return this.boundPort;

View File

@ -110,6 +110,7 @@ final class Portmap {
}
});
tcpServer.setOption("reuseAddress", true);
tcpServer.setOption("child.reuseAddress", true);
udpServer = new ConnectionlessBootstrap(new NioDatagramChannelFactory(
Executors.newCachedThreadPool()));

View File

@ -37,7 +37,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<dependency>
<groupId>com.squareup.okhttp</groupId>
<artifactId>okhttp</artifactId>
<version>2.4.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>

View File

@ -155,10 +155,16 @@ public class Hdfs extends AbstractFileSystem {
}
@Override
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
return dfs.getServerDefaults();
}
@Override
public FsServerDefaults getServerDefaults(final Path f) throws IOException {
return dfs.getServerDefaults();
}
@Override
public RemoteIterator<LocatedFileStatus> listLocatedStatus(
final Path p)

View File

@ -1731,10 +1731,14 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
checkOpen();
Preconditions.checkArgument(length >= 0);
LocatedBlocks blockLocations = getBlockLocations(src, length);
LocatedBlocks blockLocations = null;
FileChecksumHelper.FileChecksumComputer maker = null;
ErasureCodingPolicy ecPolicy = null;
if (length > 0) {
blockLocations = getBlockLocations(src, length);
ecPolicy = blockLocations.getErasureCodingPolicy();
}
FileChecksumHelper.FileChecksumComputer maker;
ErasureCodingPolicy ecPolicy = blockLocations.getErasureCodingPolicy();
maker = ecPolicy != null ?
new FileChecksumHelper.StripedFileNonStripedChecksumComputer(src,
length, blockLocations, namenode, this, ecPolicy) :

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs;
import com.google.common.collect.Iterators;
import java.util.Collections;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.inotify.EventBatch;
@ -72,7 +72,7 @@ public class DFSInotifyEventInputStream {
DFSInotifyEventInputStream(ClientProtocol namenode, Tracer tracer,
long lastReadTxid) {
this.namenode = namenode;
this.it = Iterators.emptyIterator();
this.it = Collections.emptyIterator();
this.lastReadTxid = lastReadTxid;
this.tracer = tracer;
}

View File

@ -777,7 +777,7 @@ public class DFSInputStream extends FSInputStream
}
} finally {
// Check if need to report block replicas corruption either read
// was successful or ChecksumException occured.
// was successful or ChecksumException occurred.
reportCheckSumFailure(corruptedBlocks,
currentLocatedBlock.getLocations().length, false);
}

View File

@ -177,14 +177,18 @@ public class DFSStripedInputStream extends DFSInputStream {
@Override
public synchronized void close() throws IOException {
super.close();
if (curStripeBuf != null) {
BUFFER_POOL.putBuffer(curStripeBuf);
curStripeBuf = null;
}
if (parityBuf != null) {
BUFFER_POOL.putBuffer(parityBuf);
parityBuf = null;
try {
super.close();
} finally {
if (curStripeBuf != null) {
BUFFER_POOL.putBuffer(curStripeBuf);
curStripeBuf = null;
}
if (parityBuf != null) {
BUFFER_POOL.putBuffer(parityBuf);
parityBuf = null;
}
decoder.release();
}
}
@ -390,7 +394,7 @@ public class DFSStripedInputStream extends DFSInputStream {
return result;
} finally {
// Check if need to report block replicas corruption either read
// was successful or ChecksumException occured.
// was successful or ChecksumException occurred.
reportCheckSumFailure(corruptedBlocks,
currentLocatedBlock.getLocations().length, true);
}

View File

@ -1033,6 +1033,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
setClosed();
// shutdown executor of flushAll tasks
flushAllExecutor.shutdownNow();
encoder.release();
}
}

View File

@ -366,7 +366,7 @@ public class DFSUtilClient {
static Map<String, InetSocketAddress> getAddressesForNameserviceId(
Configuration conf, String nsId, String defaultValue, String... keys) {
Collection<String> nnIds = getNameNodeIds(conf, nsId);
Map<String, InetSocketAddress> ret = Maps.newHashMap();
Map<String, InetSocketAddress> ret = Maps.newLinkedHashMap();
for (String nnId : emptyAsSingletonNull(nnIds)) {
String suffix = concatSuffixes(nsId, nnId);
String address = getConfValue(defaultValue, suffix, conf, keys);

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSDataOutputStreamBuilder;
import org.apache.hadoop.fs.FSLinkResolver;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileEncryptionInfo;
@ -446,6 +447,48 @@ public class DistributedFileSystem extends FileSystem {
}.resolve(this, absF);
}
/**
* Same as
* {@link #create(Path, FsPermission, EnumSet<CreateFlag>, int, short, long,
* Progressable, ChecksumOpt)} with the addition of favoredNodes that is a
* hint to where the namenode should place the file blocks.
* The favored nodes hint is not persisted in HDFS. Hence it may be honored
* at the creation time only. And with favored nodes, blocks will be pinned
* on the datanodes to prevent balancing move the block. HDFS could move the
* blocks during replication, to move the blocks from favored nodes. A value
* of null means no favored nodes for this create
*/
private HdfsDataOutputStream create(final Path f,
final FsPermission permission, EnumSet<CreateFlag> flag,
final int bufferSize, final short replication, final long blockSize,
final Progressable progress, final ChecksumOpt checksumOpt,
final InetSocketAddress[] favoredNodes) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.CREATE);
Path absF = fixRelativePart(f);
return new FileSystemLinkResolver<HdfsDataOutputStream>() {
@Override
public HdfsDataOutputStream doCall(final Path p) throws IOException {
final DFSOutputStream out = dfs.create(getPathName(f), permission,
flag, true, replication, blockSize, progress, bufferSize,
checksumOpt, favoredNodes);
return dfs.createWrappedOutputStream(out, statistics);
}
@Override
public HdfsDataOutputStream next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem)fs;
return myDfs.create(p, permission, flag, bufferSize, replication,
blockSize, progress, checksumOpt, favoredNodes);
}
throw new UnsupportedOperationException("Cannot create with" +
" favoredNodes through a symlink to a non-DistributedFileSystem: "
+ f + " -> " + p);
}
}.resolve(this, absF);
}
@Override
protected HdfsDataOutputStream primitiveCreate(Path f,
FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
@ -2584,4 +2627,42 @@ public class DistributedFileSystem extends FileSystem {
DFSOpsCountStatistics getDFSOpsCountStatistics() {
return storageStatistics;
}
/**
* Extends FSDataOutputStreamBuilder to support special requirements
* of DistributedFileSystem.
*/
public static class HdfsDataOutputStreamBuilder
extends FSDataOutputStreamBuilder {
private final DistributedFileSystem dfs;
private InetSocketAddress[] favoredNodes = null;
public HdfsDataOutputStreamBuilder(DistributedFileSystem dfs, Path path) {
super(dfs, path);
this.dfs = dfs;
}
protected InetSocketAddress[] getFavoredNodes() {
return favoredNodes;
}
public HdfsDataOutputStreamBuilder setFavoredNodes(
final InetSocketAddress[] nodes) {
Preconditions.checkNotNull(nodes);
favoredNodes = nodes.clone();
return this;
}
@Override
public HdfsDataOutputStream build() throws IOException {
return dfs.create(getPath(), getPermission(), getFlags(),
getBufferSize(), getReplication(), getBlockSize(),
getProgress(), getChecksumOpt(), getFavoredNodes());
}
}
@Override
public HdfsDataOutputStreamBuilder newFSDataOutputStreamBuilder(Path path) {
return new HdfsDataOutputStreamBuilder(this, path);
}
}

View File

@ -95,11 +95,13 @@ final class FileChecksumHelper {
this.client = client;
this.remaining = length;
if (src.contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR)) {
this.remaining = Math.min(length, blockLocations.getFileLength());
}
this.locatedBlocks = blockLocations.getLocatedBlocks();
if (blockLocations != null) {
if (src.contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR)) {
this.remaining = Math.min(length, blockLocations.getFileLength());
}
this.locatedBlocks = blockLocations.getLocatedBlocks();
}
}
String getSrc() {
@ -203,9 +205,23 @@ final class FileChecksumHelper {
* @throws IOException
*/
void compute() throws IOException {
checksumBlocks();
fileChecksum = makeFinalResult();
/**
* request length is 0 or the file is empty, return one with the
* magic entry that matches what previous hdfs versions return.
*/
if (locatedBlocks == null || locatedBlocks.isEmpty()) {
// Explicitly specified here in case the default DataOutputBuffer
// buffer length value is changed in future. This matters because the
// fixed value 32 has to be used to repeat the magic value for previous
// HDFS version.
final int lenOfZeroBytes = 32;
byte[] emptyBlockMd5 = new byte[lenOfZeroBytes];
MD5Hash fileMD5 = MD5Hash.digest(emptyBlockMd5);
fileChecksum = new MD5MD5CRC32GzipFileChecksum(0, 0, fileMD5);
} else {
checksumBlocks();
fileChecksum = makeFinalResult();
}
}
/**
@ -228,15 +244,7 @@ final class FileChecksumHelper {
return new MD5MD5CRC32CastagnoliFileChecksum(bytesPerCRC,
crcPerBlock, fileMD5);
default:
// If there is no block allocated for the file,
// return one with the magic entry that matches what previous
// hdfs versions return.
if (locatedBlocks.isEmpty()) {
return new MD5MD5CRC32GzipFileChecksum(0, 0, fileMD5);
}
// we should never get here since the validity was checked
// when getCrcType() was called above.
// we will get here when crcType is "NULL".
return null;
}
}
@ -412,7 +420,7 @@ final class FileChecksumHelper {
}
/**
* Striped file checksum computing.
* Non-striped checksum computing for striped files.
*/
static class StripedFileNonStripedChecksumComputer
extends FileChecksumComputer {

View File

@ -264,6 +264,8 @@ public interface HdfsClientConfigKeys {
String CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY =
PREFIX + "connection.retries.on.timeouts";
int CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0;
String RANDOM_ORDER = PREFIX + "random.order";
boolean RANDOM_ORDER_DEFAULT = false;
}
/** dfs.client.write configuration properties */

View File

@ -55,6 +55,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
private String softwareVersion;
private List<String> dependentHostNames = new LinkedList<>();
private String upgradeDomain;
public static final DatanodeInfo[] EMPTY_ARRAY = {};
// Datanode administrative states
public enum AdminStates {

View File

@ -33,8 +33,10 @@ public class FsPermissionExtension extends FsPermission {
private final static short ACL_BIT = 1 << 12;
private final static short ENCRYPTED_BIT = 1 << 13;
private final static short ERASURE_CODED_BIT = 1 << 14;
private final boolean aclBit;
private final boolean encryptedBit;
private final boolean erasureCodedBit;
/**
* Constructs a new FsPermissionExtension based on the given FsPermission.
@ -42,10 +44,11 @@ public class FsPermissionExtension extends FsPermission {
* @param perm FsPermission containing permission bits
*/
public FsPermissionExtension(FsPermission perm, boolean hasAcl,
boolean isEncrypted) {
boolean isEncrypted, boolean isErasureCoded) {
super(perm.toShort());
aclBit = hasAcl;
encryptedBit = isEncrypted;
erasureCodedBit = isErasureCoded;
}
/**
@ -57,12 +60,15 @@ public class FsPermissionExtension extends FsPermission {
super(perm);
aclBit = (perm & ACL_BIT) != 0;
encryptedBit = (perm & ENCRYPTED_BIT) != 0;
erasureCodedBit = (perm & ERASURE_CODED_BIT) != 0;
}
@Override
public short toExtendedShort() {
return (short)(toShort() |
(aclBit ? ACL_BIT : 0) | (encryptedBit ? ENCRYPTED_BIT : 0));
return (short)(toShort()
| (aclBit ? ACL_BIT : 0)
| (encryptedBit ? ENCRYPTED_BIT : 0)
| (erasureCodedBit ? ERASURE_CODED_BIT : 0));
}
@Override
@ -75,6 +81,11 @@ public class FsPermissionExtension extends FsPermission {
return encryptedBit;
}
@Override
public boolean getErasureCodedBit() {
return erasureCodedBit;
}
@Override
public boolean equals(Object o) {
// This intentionally delegates to the base class. This is only overridden

View File

@ -0,0 +1,146 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import com.google.common.collect.ImmutableMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Map;
/**
* A class that allows a DataNode to communicate information about all
* its disks that appear to be slow.
*
* The wire representation of this structure is a list of
* SlowDiskReportProto messages.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class SlowDiskReports {
/**
* A map from the DataNode Disk's BasePath to its mean metadata op latency,
* mean read io latency and mean write io latency.
*
* The NameNode must not attempt to interpret the mean latencies
* beyond exposing them as a diagnostic. e.g. metrics. Also, comparing
* latencies across reports from different DataNodes may not be not
* meaningful and must be avoided.
*/
@Nonnull
private final Map<String, Map<DiskOp, Double>> slowDisks;
/**
* An object representing a SlowDiskReports with no entries. Should
* be used instead of null or creating new objects when there are
* no slow peers to report.
*/
public static final SlowDiskReports EMPTY_REPORT =
new SlowDiskReports(ImmutableMap.of());
private SlowDiskReports(Map<String, Map<DiskOp, Double>> slowDisks) {
this.slowDisks = slowDisks;
}
public static SlowDiskReports create(
@Nullable Map<String, Map<DiskOp, Double>> slowDisks) {
if (slowDisks == null || slowDisks.isEmpty()) {
return EMPTY_REPORT;
}
return new SlowDiskReports(slowDisks);
}
public Map<String, Map<DiskOp, Double>> getSlowDisks() {
return slowDisks;
}
public boolean haveSlowDisks() {
return slowDisks.size() > 0;
}
/**
* Return true if the two objects represent the same set slow disk
* entries. Primarily for unit testing convenience.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof SlowDiskReports)) {
return false;
}
SlowDiskReports that = (SlowDiskReports) o;
if (this.slowDisks.size() != that.slowDisks.size()) {
return false;
}
if (!this.slowDisks.keySet().containsAll(that.slowDisks.keySet()) ||
!that.slowDisks.keySet().containsAll(this.slowDisks.keySet())) {
return false;
}
boolean areEqual;
for (String disk : this.slowDisks.keySet()) {
if (!this.slowDisks.get(disk).equals(that.slowDisks.get(disk))) {
return false;
}
}
return true;
}
@Override
public int hashCode() {
return slowDisks.hashCode();
}
/**
* Lists the types of operations on which disk latencies are measured.
*/
public enum DiskOp {
METADATA("MetadataOp"),
READ("ReadIO"),
WRITE("WriteIO");
private final String value;
DiskOp(final String v) {
this.value = v;
}
@Override
public String toString() {
return value;
}
public static DiskOp fromValue(final String value) {
for (DiskOp as : DiskOp.values()) {
if (as.value.equals(value)) {
return as;
}
}
return null;
}
}
}

View File

@ -98,12 +98,13 @@ class JsonUtilClient {
/** Convert a string to a FsPermission object. */
static FsPermission toFsPermission(
final String s, Boolean aclBit, Boolean encBit) {
final String s, Boolean aclBit, Boolean encBit, Boolean erasureBit) {
FsPermission perm = new FsPermission(Short.parseShort(s, 8));
final boolean aBit = (aclBit != null) ? aclBit : false;
final boolean eBit = (encBit != null) ? encBit : false;
if (aBit || eBit) {
return new FsPermissionExtension(perm, aBit, eBit);
final boolean ecBit = (erasureBit != null) ? erasureBit : false;
if (aBit || eBit || ecBit) {
return new FsPermissionExtension(perm, aBit, eBit, ecBit);
} else {
return perm;
}
@ -129,7 +130,8 @@ class JsonUtilClient {
final String group = (String) m.get("group");
final FsPermission permission = toFsPermission((String) m.get("permission"),
(Boolean) m.get("aclBit"),
(Boolean) m.get("encBit"));
(Boolean) m.get("encBit"),
(Boolean) m.get("ecBit"));
final long aTime = ((Number) m.get("accessTime")).longValue();
final long mTime = ((Number) m.get("modificationTime")).longValue();
final long blockSize = ((Number) m.get("blockSize")).longValue();
@ -464,7 +466,8 @@ class JsonUtilClient {
String permString = (String) m.get("permission");
if (permString != null) {
final FsPermission permission = toFsPermission(permString,
(Boolean) m.get("aclBit"), (Boolean) m.get("encBit"));
(Boolean) m.get("aclBit"), (Boolean) m.get("encBit"),
(Boolean) m.get("ecBit"));
aclStatusBuilder.setPermission(permission);
}
final List<?> entries = (List<?>) m.get("entries");

View File

@ -193,6 +193,7 @@ public class HttpFSFileSystem extends FileSystem
public static final String ACL_BIT_JSON = "aclBit";
public static final String ENC_BIT_JSON = "encBit";
public static final String EC_BIT_JSON = "ecBit";
public static final String DIRECTORY_LISTING_JSON = "DirectoryListing";
public static final String PARTIAL_LISTING_JSON = "partialListing";
@ -1042,11 +1043,13 @@ public class HttpFSFileSystem extends FileSystem
final String s = (String) json.get(PERMISSION_JSON);
final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON);
final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON);
final Boolean erasureBit = (Boolean) json.get(EC_BIT_JSON);
FsPermission perm = new FsPermission(Short.parseShort(s, 8));
final boolean aBit = (aclBit != null) ? aclBit : false;
final boolean eBit = (encBit != null) ? encBit : false;
if (aBit || eBit) {
return new FsPermissionExtension(perm, aBit, eBit);
final boolean ecBit = (erasureBit != null) ? erasureBit : false;
if (aBit || eBit || ecBit) {
return new FsPermissionExtension(perm, aBit, eBit, ecBit);
} else {
return perm;
}

View File

@ -114,6 +114,9 @@ public class FSOperations {
if (fileStatus.getPermission().getEncryptedBit()) {
json.put(HttpFSFileSystem.ENC_BIT_JSON, true);
}
if (fileStatus.getPermission().getErasureCodedBit()) {
json.put(HttpFSFileSystem.EC_BIT_JSON, true);
}
return json;
}
@ -330,7 +333,7 @@ public class FSOperations {
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
@ -355,7 +358,7 @@ public class FSOperations {
* Creates a Concat executor.
*
* @param path target path to concat to.
* @param sources comma seperated absolute paths to use as sources.
* @param sources comma separated absolute paths to use as sources.
*/
public FSConcat(String path, String[] sources) {
this.sources = new Path[sources.length];
@ -374,7 +377,7 @@ public class FSOperations {
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
@ -415,7 +418,7 @@ public class FSOperations {
* wait for it to complete before proceeding with further file
* updates.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public JSONObject execute(FileSystem fs) throws IOException {
@ -449,7 +452,7 @@ public class FSOperations {
*
* @return a Map object (JSON friendly) with the content-summary.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
@ -498,7 +501,7 @@ public class FSOperations {
*
* @return The URI of the created file.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
@ -546,7 +549,7 @@ public class FSOperations {
* @return <code>true</code> if the delete operation was successful,
* <code>false</code> otherwise.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public JSONObject execute(FileSystem fs) throws IOException {
@ -580,7 +583,7 @@ public class FSOperations {
*
* @return a Map object (JSON friendly) with the file checksum.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
@ -637,7 +640,7 @@ public class FSOperations {
*
* @return a JSON object with the user home directory.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
@SuppressWarnings("unchecked")
@ -762,7 +765,7 @@ public class FSOperations {
* @return <code>true</code> if the mkdirs operation was successful,
* <code>false</code> otherwise.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public JSONObject execute(FileSystem fs) throws IOException {
@ -796,7 +799,7 @@ public class FSOperations {
*
* @return The inputstream of the file.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public InputStream execute(FileSystem fs) throws IOException {
@ -834,7 +837,7 @@ public class FSOperations {
* @return <code>true</code> if the rename operation was successful,
* <code>false</code> otherwise.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public JSONObject execute(FileSystem fs) throws IOException {
@ -873,7 +876,7 @@ public class FSOperations {
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
@ -910,7 +913,7 @@ public class FSOperations {
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
@ -1183,7 +1186,7 @@ public class FSOperations {
* @return <code>true</code> if the replication value was set,
* <code>false</code> otherwise.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
@SuppressWarnings("unchecked")
@ -1225,7 +1228,7 @@ public class FSOperations {
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
@ -1311,7 +1314,7 @@ public class FSOperations {
*
* @return Map a map object (JSON friendly) with the xattr names.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
@ -1350,7 +1353,7 @@ public class FSOperations {
*
* @return Map a map object (JSON friendly) with the xattrs.
*
* @throws IOException thrown if an IO error occured.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Map execute(FileSystem fs) throws IOException {

View File

@ -36,13 +36,13 @@ HttpFS itself is Java Jetty web-application.
HttpFS HTTP web-service API calls are HTTP REST calls that map to a HDFS file system operation. For example, using the `curl` Unix command:
* `$ curl http://httpfs-host:14000/webhdfs/v1/user/foo/README.txt` returns the contents of the HDFS `/user/foo/README.txt` file.
* `$ curl 'http://httpfs-host:14000/webhdfs/v1/user/foo/README.txt?op=OPEN&user.name=foo'` returns the contents of the HDFS `/user/foo/README.txt` file.
* `$ curl http://httpfs-host:14000/webhdfs/v1/user/foo?op=list` returns the contents of the HDFS `/user/foo` directory in JSON format.
* `$ curl 'http://httpfs-host:14000/webhdfs/v1/user/foo?op=LISTSTATUS&user.name=foo'` returns the contents of the HDFS `/user/foo` directory in JSON format.
* `$ curl http://httpfs-host:14000/webhdfs/v1/user/foo?op=GETTRASHROOT` returns the path `/user/foo/.Trash`, if `/` is an encrypted zone, returns the path `/.Trash/foo`. See [more details](../hadoop-project-dist/hadoop-hdfs/TransparentEncryption.html#Rename_and_Trash_considerations) about trash path in an encrypted zone.
* `$ curl 'http://httpfs-host:14000/webhdfs/v1/user/foo?op=GETTRASHROOT&user.name=foo'` returns the path `/user/foo/.Trash`, if `/` is an encrypted zone, returns the path `/.Trash/foo`. See [more details](../hadoop-project-dist/hadoop-hdfs/TransparentEncryption.html#Rename_and_Trash_considerations) about trash path in an encrypted zone.
* `$ curl -X POST http://httpfs-host:14000/webhdfs/v1/user/foo/bar?op=mkdirs` creates the HDFS `/user/foo.bar` directory.
* `$ curl -X POST 'http://httpfs-host:14000/webhdfs/v1/user/foo/bar?op=MKDIRS&user.name=foo'` creates the HDFS `/user/foo/bar` directory.
User and Developer Documentation
--------------------------------

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
@ -944,6 +945,24 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
assertFalse(httpStatus.isEncrypted());
}
private void testErasureCoding() throws Exception {
Assume.assumeFalse("Assume its not a local FS!", isLocalFS());
FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
FileSystem httpFS = getHttpFSFileSystem();
Path filePath = new Path(getProxiedFSTestDir(), "foo.txt");
proxyFs.create(filePath).close();
ContractTestUtils.assertNotErasureCoded(httpFS, getProxiedFSTestDir());
ContractTestUtils.assertNotErasureCoded(httpFS, filePath);
ContractTestUtils.assertErasureCoded(httpFS,
TestHdfsHelper.ERASURE_CODING_DIR);
ContractTestUtils.assertErasureCoded(httpFS,
TestHdfsHelper.ERASURE_CODING_FILE);
proxyFs.close();
httpFS.close();
}
private void testStoragePolicy() throws Exception {
Assume.assumeFalse("Assume its not a local FS", isLocalFS());
FileSystem fs = FileSystem.get(getProxiedFSConf());
@ -993,7 +1012,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER,
SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR,
GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION, LIST_STATUS_BATCH,
GETTRASHROOT, STORAGEPOLICY
GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING
}
private void operation(Operation op) throws Exception {
@ -1079,6 +1098,9 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
case STORAGEPOLICY:
testStoragePolicy();
break;
case ERASURE_CODING:
testErasureCoding();
break;
}
}

View File

@ -223,6 +223,24 @@ public class TestHttpFSServer extends HFSTestCase {
reader.close();
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testMkdirs() throws Exception {
createHttpFSServer(false);
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
"/webhdfs/v1/tmp/sub-tmp?user.name={0}&op=MKDIRS", user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
conn.connect();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
getStatus("/tmp/sub-tmp", "LISTSTATUS");
}
@Test
@TestDir
@TestJetty

View File

@ -31,6 +31,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.junit.Test;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.Statement;
@ -136,6 +139,10 @@ public class TestHdfsHelper extends TestDirHelper {
public static final Path ENCRYPTION_ZONE = new Path("/ez");
public static final Path ENCRYPTED_FILE = new Path("/ez/encfile");
public static final Path ERASURE_CODING_DIR = new Path("/ec");
public static final Path ERASURE_CODING_FILE = new Path("/ec/ecfile");
public static final ErasureCodingPolicy ERASURE_CODING_POLICY =
ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.XOR_2_1_POLICY_ID);
private static MiniDFSCluster MINI_DFS = null;
@ -161,8 +168,12 @@ public class TestHdfsHelper extends TestDirHelper {
new Path(helper.getTestRootDir(), "test.jks").toUri();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
jceksPath);
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
ERASURE_CODING_POLICY.getName());
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
builder.numDataNodes(2);
int totalDataNodes = ERASURE_CODING_POLICY.getNumDataUnits() +
ERASURE_CODING_POLICY.getNumParityUnits();
builder.numDataNodes(totalDataNodes);
MiniDFSCluster miniHdfs = builder.build();
final String testkey = "testkey";
DFSTestUtil.createKey(testkey, miniHdfs, conf);
@ -179,6 +190,11 @@ public class TestHdfsHelper extends TestDirHelper {
fileSystem.createEncryptionZone(ENCRYPTION_ZONE, testkey);
fileSystem.create(ENCRYPTED_FILE).close();
fileSystem.mkdirs(ERASURE_CODING_DIR);
fileSystem.setErasureCodingPolicy(ERASURE_CODING_DIR,
ERASURE_CODING_POLICY.getName());
fileSystem.create(ERASURE_CODING_FILE).close();
MINI_DFS = miniHdfs;
}
return MINI_DFS;

View File

@ -249,4 +249,16 @@
<Method name="getBlockLayoutRedundancy" />
<Bug pattern="BIT_IOR_OF_SIGNED_BYTE" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture" />
<Bug pattern="DLS_DEAD_STORE_OF_CLASS_LITERAL" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture" />
<Bug pattern="DLS_DEAD_LOCAL_STORE" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture" />
<Bug pattern="NS_DANGEROUS_NON_SHORT_CIRCUIT" />
</Match>
</FindBugsFilter>

File diff suppressed because one or more lines are too long

View File

@ -562,6 +562,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT =
"10m";
public static final String DFS_NAMENODE_EC_POLICIES_ENABLED_KEY = "dfs.namenode.ec.policies.enabled";
public static final String DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT = "";
public static final String DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = "dfs.datanode.ec.reconstruction.stripedread.threads";
public static final int DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20;
public static final String DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = "dfs.datanode.ec.reconstruction.stripedread.buffer.size";

View File

@ -0,0 +1,349 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.net;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Random;
/**
* The HDFS specific network topology class. The main purpose of doing this
* subclassing is to add storage-type-aware chooseRandom method. All the
* remaining parts should be the same.
*
* Currently a placeholder to test storage type info.
*/
public class DFSNetworkTopology extends NetworkTopology {
private static final Random RANDOM = new Random();
public static DFSNetworkTopology getInstance(Configuration conf) {
DFSNetworkTopology nt = new DFSNetworkTopology();
return (DFSNetworkTopology)nt.init(DFSTopologyNodeImpl.FACTORY);
}
/**
* Randomly choose one node from <i>scope</i>, with specified storage type.
*
* If scope starts with ~, choose one from the all nodes except for the
* ones in <i>scope</i>; otherwise, choose one from <i>scope</i>.
* If excludedNodes is given, choose a node that's not in excludedNodes.
*
* @param scope range of nodes from which a node will be chosen
* @param excludedNodes nodes to be excluded from
* @param type the storage type we search for
* @return the chosen node
*/
public Node chooseRandomWithStorageType(final String scope,
final Collection<Node> excludedNodes, StorageType type) {
netlock.readLock().lock();
try {
if (scope.startsWith("~")) {
return chooseRandomWithStorageType(
NodeBase.ROOT, scope.substring(1), excludedNodes, type);
} else {
return chooseRandomWithStorageType(
scope, null, excludedNodes, type);
}
} finally {
netlock.readLock().unlock();
}
}
/**
* Randomly choose one node from <i>scope</i> with the given storage type.
*
* If scope starts with ~, choose one from the all nodes except for the
* ones in <i>scope</i>; otherwise, choose one from <i>scope</i>.
* If excludedNodes is given, choose a node that's not in excludedNodes.
*
* This call would make up to two calls. It first tries to get a random node
* (with old method) and check if it satisfies. If yes, simply return it.
* Otherwise, it make a second call (with the new method) by passing in a
* storage type.
*
* This is for better performance reason. Put in short, the key note is that
* the old method is faster but may take several runs, while the new method
* is somewhat slower, and always succeed in one trial.
* See HDFS-11535 for more detail.
*
* @param scope range of nodes from which a node will be chosen
* @param excludedNodes nodes to be excluded from
* @param type the storage type we search for
* @return the chosen node
*/
public Node chooseRandomWithStorageTypeTwoTrial(final String scope,
final Collection<Node> excludedNodes, StorageType type) {
netlock.readLock().lock();
try {
String searchScope;
String excludedScope;
if (scope.startsWith("~")) {
searchScope = NodeBase.ROOT;
excludedScope = scope.substring(1);
} else {
searchScope = scope;
excludedScope = null;
}
// next do a two-trial search
// first trial, call the old method, inherited from NetworkTopology
Node n = chooseRandom(searchScope, excludedScope, excludedNodes);
if (n == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("No node to choose.");
}
// this means there is simply no node to choose from
return null;
}
Preconditions.checkArgument(n instanceof DatanodeDescriptor);
DatanodeDescriptor dnDescriptor = (DatanodeDescriptor)n;
if (dnDescriptor.hasStorageType(type)) {
// the first trial succeeded, just return
return dnDescriptor;
} else {
// otherwise, make the second trial by calling the new method
LOG.debug("First trial failed, node has no type {}, " +
"making second trial carrying this type", type);
return chooseRandomWithStorageType(searchScope, excludedScope,
excludedNodes, type);
}
} finally {
netlock.readLock().unlock();
}
}
/**
* Choose a random node based on given scope, excludedScope and excludedNodes
* set. Although in general the topology has at most three layers, this class
* will not impose such assumption.
*
* At high level, the idea is like this, say:
*
* R has two children A and B, and storage type is X, say:
* A has X = 6 (rooted at A there are 6 datanodes with X) and B has X = 8.
*
* Then R will generate a random int between 1~14, if it's <= 6, recursively
* call into A, otherwise B. This will maintain a uniformed randomness of
* choosing datanodes.
*
* The tricky part is how to handle excludes.
*
* For excludedNodes, since this set is small: currently the main reason of
* being an excluded node is because it already has a replica. So randomly
* picking up this node again should be rare. Thus we only check that, if the
* chosen node is excluded, we do chooseRandom again.
*
* For excludedScope, we locate the root of the excluded scope. Subtracting
* all it's ancestors' storage counters accordingly, this way the excluded
* root is out of the picture.
*
* @param scope the scope where we look for node.
* @param excludedScope the scope where the node must NOT be from.
* @param excludedNodes the returned node must not be in this set
* @return a node with required storage type
*/
@VisibleForTesting
Node chooseRandomWithStorageType(final String scope,
String excludedScope, final Collection<Node> excludedNodes,
StorageType type) {
if (excludedScope != null) {
if (scope.startsWith(excludedScope)) {
return null;
}
if (!excludedScope.startsWith(scope)) {
excludedScope = null;
}
}
Node node = getNode(scope);
if (node == null) {
LOG.debug("Invalid scope {}, non-existing node", scope);
return null;
}
if (!(node instanceof DFSTopologyNodeImpl)) {
// a node is either DFSTopologyNodeImpl, or a DatanodeDescriptor
return ((DatanodeDescriptor)node).hasStorageType(type) ? node : null;
}
DFSTopologyNodeImpl root = (DFSTopologyNodeImpl)node;
Node excludeRoot = excludedScope == null ? null : getNode(excludedScope);
// check to see if there are nodes satisfying the condition at all
int availableCount = root.getSubtreeStorageCount(type);
if (excludeRoot != null && root.isAncestor(excludeRoot)) {
if (excludeRoot instanceof DFSTopologyNodeImpl) {
availableCount -= ((DFSTopologyNodeImpl)excludeRoot)
.getSubtreeStorageCount(type);
} else {
availableCount -= ((DatanodeDescriptor)excludeRoot)
.hasStorageType(type) ? 1 : 0;
}
}
if (excludedNodes != null) {
for (Node excludedNode : excludedNodes) {
// all excluded nodes should be DatanodeDescriptor
Preconditions.checkArgument(excludedNode instanceof DatanodeDescriptor);
availableCount -= ((DatanodeDescriptor) excludedNode)
.hasStorageType(type) ? 1 : 0;
}
}
if (availableCount <= 0) {
// should never be <0 in general, adding <0 check for safety purpose
return null;
}
// to this point, it is guaranteed that there is at least one node
// that satisfies the requirement, keep trying until we found one.
Node chosen;
do {
chosen = chooseRandomWithStorageTypeAndExcludeRoot(root, excludeRoot,
type);
if (excludedNodes == null || !excludedNodes.contains(chosen)) {
break;
} else {
LOG.debug("Node {} is excluded, continuing.", chosen);
}
} while (true);
LOG.debug("chooseRandom returning {}", chosen);
return chosen;
}
/**
* Choose a random node that has the required storage type, under the given
* root, with an excluded subtree root (could also just be a leaf node).
*
* Note that excludedNode is checked after a random node, so it is not being
* handled here.
*
* @param root the root node where we start searching for a datanode
* @param excludeRoot the root of the subtree what should be excluded
* @param type the expected storage type
* @return a random datanode, with the storage type, and is not in excluded
* scope
*/
private Node chooseRandomWithStorageTypeAndExcludeRoot(
DFSTopologyNodeImpl root, Node excludeRoot, StorageType type) {
Node chosenNode;
if (root.isRack()) {
// children are datanode descriptor
ArrayList<Node> candidates = new ArrayList<>();
for (Node node : root.getChildren()) {
if (node.equals(excludeRoot)) {
continue;
}
DatanodeDescriptor dnDescriptor = (DatanodeDescriptor)node;
if (dnDescriptor.hasStorageType(type)) {
candidates.add(node);
}
}
if (candidates.size() == 0) {
return null;
}
// to this point, all nodes in candidates are valid choices, and they are
// all datanodes, pick a random one.
chosenNode = candidates.get(RANDOM.nextInt(candidates.size()));
} else {
// the children are inner nodes
ArrayList<DFSTopologyNodeImpl> candidates =
getEligibleChildren(root, excludeRoot, type);
if (candidates.size() == 0) {
return null;
}
// again, all children are also inner nodes, we can do this cast.
// to maintain uniformality, the search needs to be based on the counts
// of valid datanodes. Below is a random weighted choose.
int totalCounts = 0;
int[] countArray = new int[candidates.size()];
for (int i = 0; i < candidates.size(); i++) {
DFSTopologyNodeImpl innerNode = candidates.get(i);
int subTreeCount = innerNode.getSubtreeStorageCount(type);
totalCounts += subTreeCount;
countArray[i] = subTreeCount;
}
// generate a random val between [1, totalCounts]
int randomCounts = RANDOM.nextInt(totalCounts) + 1;
int idxChosen = 0;
// searching for the idxChosen can potentially be done with binary
// search, but does not seem to worth it here.
for (int i = 0; i < countArray.length; i++) {
if (randomCounts <= countArray[i]) {
idxChosen = i;
break;
}
randomCounts -= countArray[i];
}
DFSTopologyNodeImpl nextRoot = candidates.get(idxChosen);
chosenNode = chooseRandomWithStorageTypeAndExcludeRoot(
nextRoot, excludeRoot, type);
}
return chosenNode;
}
/**
* Given root, excluded root and storage type. Find all the children of the
* root, that has the storage type available. One check is that if the
* excluded root is under a children, this children must subtract the storage
* count of the excluded root.
* @param root the subtree root we check.
* @param excludeRoot the root of the subtree that should be excluded.
* @param type the storage type we look for.
* @return a list of possible nodes, each of them is eligible as the next
* level root we search.
*/
private ArrayList<DFSTopologyNodeImpl> getEligibleChildren(
DFSTopologyNodeImpl root, Node excludeRoot, StorageType type) {
ArrayList<DFSTopologyNodeImpl> candidates = new ArrayList<>();
int excludeCount = 0;
if (excludeRoot != null && root.isAncestor(excludeRoot)) {
// the subtree to be excluded is under the given root,
// find out the number of nodes to be excluded.
if (excludeRoot instanceof DFSTopologyNodeImpl) {
// if excludedRoot is an inner node, get the counts of all nodes on
// this subtree of that storage type.
excludeCount = ((DFSTopologyNodeImpl) excludeRoot)
.getSubtreeStorageCount(type);
} else {
// if excludedRoot is a datanode, simply ignore this one node
if (((DatanodeDescriptor) excludeRoot).hasStorageType(type)) {
excludeCount = 1;
}
}
}
// have calculated the number of storage counts to be excluded.
// walk through all children to check eligibility.
for (Node node : root.getChildren()) {
DFSTopologyNodeImpl dfsNode = (DFSTopologyNodeImpl) node;
int storageCount = dfsNode.getSubtreeStorageCount(type);
if (excludeRoot != null && excludeCount != 0 &&
(dfsNode.isAncestor(excludeRoot) || dfsNode.equals(excludeRoot))) {
storageCount -= excludeCount;
}
if (storageCount > 0) {
candidates.add(dfsNode);
}
}
return candidates;
}
}

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
package org.apache.hadoop.hdfs.net;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.fs.StorageType;
@ -75,21 +75,58 @@ public class DFSTopologyNodeImpl extends InnerNodeImpl {
private final HashMap
<String, EnumMap<StorageType, Integer>> childrenStorageInfo;
/**
* This map stores storage type counts of the subtree. We can always get this
* info by iterate over the childrenStorageInfo variable. But for optimization
* purpose, we store this info directly to avoid the iteration.
*/
private final EnumMap<StorageType, Integer> storageTypeCounts;
DFSTopologyNodeImpl(String path) {
super(path);
childrenStorageInfo = new HashMap<>();
storageTypeCounts = new EnumMap<>(StorageType.class);
}
DFSTopologyNodeImpl(
String name, String location, InnerNode parent, int level) {
super(name, location, parent, level);
childrenStorageInfo = new HashMap<>();
storageTypeCounts = new EnumMap<>(StorageType.class);
}
public int getSubtreeStorageCount(StorageType type) {
if (storageTypeCounts.containsKey(type)) {
return storageTypeCounts.get(type);
} else {
return 0;
}
}
int getNumOfChildren() {
return children.size();
}
private void incStorageTypeCount(StorageType type) {
// no locking because the caller is synchronized already
if (storageTypeCounts.containsKey(type)) {
storageTypeCounts.put(type, storageTypeCounts.get(type)+1);
} else {
storageTypeCounts.put(type, 1);
}
}
private void decStorageTypeCount(StorageType type) {
// no locking because the caller is synchronized already
int current = storageTypeCounts.get(type);
current -= 1;
if (current == 0) {
storageTypeCounts.remove(type);
} else {
storageTypeCounts.put(type, current);
}
}
@Override
public boolean add(Node n) {
if (!isAncestor(n)) {
@ -118,14 +155,13 @@ public class DFSTopologyNodeImpl extends InnerNodeImpl {
}
children.add(n);
numOfLeaves++;
synchronized (childrenStorageInfo) {
if (!childrenStorageInfo.containsKey(dnDescriptor.getName())) {
childrenStorageInfo.put(
dnDescriptor.getName(), new EnumMap<>(StorageType.class));
}
for (StorageType st : dnDescriptor.getStorageTypes()) {
childrenStorageInfo.get(dnDescriptor.getName()).put(st, 1);
}
if (!childrenStorageInfo.containsKey(dnDescriptor.getName())) {
childrenStorageInfo.put(
dnDescriptor.getName(), new EnumMap<>(StorageType.class));
}
for (StorageType st : dnDescriptor.getStorageTypes()) {
childrenStorageInfo.get(dnDescriptor.getName()).put(st, 1);
incStorageTypeCount(st);
}
return true;
} else {
@ -141,25 +177,26 @@ public class DFSTopologyNodeImpl extends InnerNodeImpl {
// add n to the subtree of the next ancestor node
if (parentNode.add(n)) {
numOfLeaves++;
synchronized (childrenStorageInfo) {
if (!childrenStorageInfo.containsKey(parentNode.getName())) {
childrenStorageInfo.put(
parentNode.getName(), new EnumMap<>(StorageType.class));
for (StorageType st : dnDescriptor.getStorageTypes()) {
childrenStorageInfo.get(parentNode.getName()).put(st, 1);
}
} else {
EnumMap<StorageType, Integer> currentCount =
childrenStorageInfo.get(parentNode.getName());
for (StorageType st : dnDescriptor.getStorageTypes()) {
if (currentCount.containsKey(st)) {
currentCount.put(st, currentCount.get(st) + 1);
} else {
currentCount.put(st, 1);
}
if (!childrenStorageInfo.containsKey(parentNode.getName())) {
childrenStorageInfo.put(
parentNode.getName(), new EnumMap<>(StorageType.class));
for (StorageType st : dnDescriptor.getStorageTypes()) {
childrenStorageInfo.get(parentNode.getName()).put(st, 1);
}
} else {
EnumMap<StorageType, Integer> currentCount =
childrenStorageInfo.get(parentNode.getName());
for (StorageType st : dnDescriptor.getStorageTypes()) {
if (currentCount.containsKey(st)) {
currentCount.put(st, currentCount.get(st) + 1);
} else {
currentCount.put(st, 1);
}
}
}
for (StorageType st : dnDescriptor.getStorageTypes()) {
incStorageTypeCount(st);
}
return true;
} else {
return false;
@ -178,6 +215,16 @@ public class DFSTopologyNodeImpl extends InnerNodeImpl {
parentName, getPath(this), this, this.getLevel() + 1);
}
@Override
public boolean equals(Object o) {
return super.equals(o);
}
@Override
public int hashCode() {
return super.hashCode();
}
@Override
public boolean remove(Node n) {
if (!isAncestor(n)) {
@ -198,8 +245,9 @@ public class DFSTopologyNodeImpl extends InnerNodeImpl {
if (children.get(i).getName().equals(n.getName())) {
children.remove(i);
childrenMap.remove(n.getName());
synchronized (childrenStorageInfo) {
childrenStorageInfo.remove(dnDescriptor.getName());
childrenStorageInfo.remove(dnDescriptor.getName());
for (StorageType st : dnDescriptor.getStorageTypes()) {
decStorageTypeCount(st);
}
numOfLeaves--;
n.setParent(null);
@ -220,20 +268,21 @@ public class DFSTopologyNodeImpl extends InnerNodeImpl {
boolean isRemoved = parentNode.remove(n);
if (isRemoved) {
// if the parent node has no children, remove the parent node too
synchronized (childrenStorageInfo) {
EnumMap<StorageType, Integer> currentCount =
childrenStorageInfo.get(parentNode.getName());
EnumSet<StorageType> toRemove = EnumSet.noneOf(StorageType.class);
for (StorageType st : dnDescriptor.getStorageTypes()) {
int newCount = currentCount.get(st) - 1;
if (newCount == 0) {
toRemove.add(st);
}
currentCount.put(st, newCount);
}
for (StorageType st : toRemove) {
currentCount.remove(st);
EnumMap<StorageType, Integer> currentCount =
childrenStorageInfo.get(parentNode.getName());
EnumSet<StorageType> toRemove = EnumSet.noneOf(StorageType.class);
for (StorageType st : dnDescriptor.getStorageTypes()) {
int newCount = currentCount.get(st) - 1;
if (newCount == 0) {
toRemove.add(st);
}
currentCount.put(st, newCount);
}
for (StorageType st : toRemove) {
currentCount.remove(st);
}
for (StorageType st : dnDescriptor.getStorageTypes()) {
decStorageTypeCount(st);
}
if (parentNode.getNumOfChildren() == 0) {
for(int i=0; i < children.size(); i++) {

View File

@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo.Capability;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
@ -136,7 +137,8 @@ public class DatanodeProtocolClientSideTranslatorPB implements
int xmitsInProgress, int xceiverCount, int failedVolumes,
VolumeFailureSummary volumeFailureSummary,
boolean requestFullBlockReportLease,
@Nonnull SlowPeerReports slowPeers) throws IOException {
@Nonnull SlowPeerReports slowPeers,
@Nonnull SlowDiskReports slowDisks) throws IOException {
HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder()
.setRegistration(PBHelper.convert(registration))
.setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
@ -156,6 +158,9 @@ public class DatanodeProtocolClientSideTranslatorPB implements
if (slowPeers.haveSlowPeers()) {
builder.addAllSlowPeers(PBHelper.convertSlowPeerInfo(slowPeers));
}
if (slowDisks.haveSlowDisks()) {
builder.addAllSlowDisks(PBHelper.convertSlowDiskInfo(slowDisks));
}
HeartbeatResponseProto resp;
try {
resp = rpcProxy.sendHeartbeat(NULL_CONTROLLER, builder.build());

View File

@ -121,7 +121,8 @@ public class DatanodeProtocolServerSideTranslatorPB implements
request.getXmitsInProgress(),
request.getXceiverCount(), request.getFailedVolumes(),
volumeFailureSummary, request.getRequestFullBlockReportLease(),
PBHelper.convertSlowPeerInfo(request.getSlowPeersList()));
PBHelper.convertSlowPeerInfo(request.getSlowPeersList()),
PBHelper.convertSlowDiskInfo(request.getSlowDisksList()));
} catch (IOException e) {
throw new ServiceException(e);
}

Some files were not shown because too many files have changed in this diff Show More