getAliases() throws IOException {
@Override
public CredentialEntry createCredentialEntry(String alias, char[] credential)
throws IOException {
+ writeLock.lock();
try {
if (keyStore.containsAlias(alias) || cache.containsKey(alias)) {
throw new IOException("Credential " + alias + " already exists in " + this);
}
+ return innerSetCredential(alias, credential);
} catch (KeyStoreException e) {
throw new IOException("Problem looking up credential " + alias + " in " + this,
e);
+ } finally {
+ writeLock.unlock();
}
- return innerSetCredential(alias, credential);
}
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java
similarity index 95%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java
rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java
index 7332d34594e..510938b7fff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.util;
+package org.apache.hadoop.util;
import java.lang.ref.WeakReference;
import java.nio.ByteBuffer;
@@ -27,6 +27,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceStability;
/**
* A simple class for pooling direct ByteBuffers. This is necessary
@@ -40,7 +41,8 @@
* allocated at the same size. There is no attempt to reuse larger
* buffers to satisfy smaller allocations.
*/
-@InterfaceAudience.Private
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
public class DirectBufferPool {
// Essentially implement a multimap with weak values.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java
index 49581000ca3..16872d0891e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java
@@ -30,7 +30,7 @@
* ToolRunner
can be used to run classes implementing
* Tool
interface. It works in conjunction with
* {@link GenericOptionsParser} to parse the
- *
+ *
* generic hadoop command line arguments and modifies the
* Configuration
of the Tool
. The
* application-specific options are passed along without being modified.
diff --git a/hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm
index ecf6e75f3bc..98d1f57166f 100644
--- a/hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm
+++ b/hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm
@@ -72,10 +72,13 @@ Apache Hadoop Compatibility
* Private-Stable APIs can change across major releases,
but not within a major release.
+ * Classes not annotated are implicitly "Private". Class members not
+ annotated inherit the annotations of the enclosing class.
+
* Note: APIs generated from the proto files need to be compatible for
-rolling-upgrades. See the section on wire-compatibility for more details. The
-compatibility policies for APIs and wire-communication need to go
-hand-in-hand to address this.
+ rolling-upgrades. See the section on wire-compatibility for more details.
+ The compatibility policies for APIs and wire-communication need to go
+ hand-in-hand to address this.
** Semantic compatibility
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java
index d457c0e8a46..97dbe5e6069 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java
@@ -29,14 +29,33 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
+import org.junit.After;
+import org.junit.Before;
import org.junit.Test;
+
import static org.junit.Assert.*;
public class TestDFVariations {
+ private static final String TEST_ROOT_DIR =
+ System.getProperty("test.build.data","build/test/data") + "/TestDFVariations";
+ private static File test_root = null;
+ @Before
+ public void setup() throws IOException {
+ test_root = new File(TEST_ROOT_DIR);
+ test_root.mkdirs();
+ }
+
+ @After
+ public void after() throws IOException {
+ FileUtil.setWritable(test_root, true);
+ FileUtil.fullyDelete(test_root);
+ assertTrue(!test_root.exists());
+ }
+
public static class XXDF extends DF {
public XXDF() throws IOException {
- super(new File(System.getProperty("test.build.data","/tmp")), 0L);
+ super(test_root, 0L);
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
index 94908da7a38..54d25c995bd 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
@@ -26,11 +26,13 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.AvroTestUtil;
+import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
import com.google.common.base.Joiner;
import junit.framework.TestCase;
+import static org.junit.Assert.fail;
public class TestPath extends TestCase {
/**
@@ -305,6 +307,28 @@ public void testURI() throws URISyntaxException, IOException {
// if the child uri is absolute path
assertEquals("foo://bar/fud#boo", new Path(new Path(new URI(
"foo://bar/baz#bud")), new Path(new URI("/fud#boo"))).toString());
+
+ // empty URI
+ URI uri3 = new URI("");
+ assertEquals("", uri3.toString());
+ try {
+ path = new Path(uri3);
+ fail("Expected exception for empty URI");
+ } catch (IllegalArgumentException e) {
+ // expect to receive an IllegalArgumentException
+ GenericTestUtils.assertExceptionContains("Can not create a Path"
+ + " from an empty URI", e);
+ }
+ // null URI
+ uri3 = null;
+ try {
+ path = new Path(uri3);
+ fail("Expected exception for null URI");
+ } catch (IllegalArgumentException e) {
+ // expect to receive an IllegalArgumentException
+ GenericTestUtils.assertExceptionContains("Can not create a Path"
+ + " from a null URI", e);
+ }
}
/** Test URIs created from Path objects */
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
index 3ddd68da23f..ea9e9847fd3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
@@ -35,19 +35,22 @@
import org.junit.Test;
public class TestPathData {
+ private static final String TEST_ROOT_DIR =
+ System.getProperty("test.build.data","build/test/data") + "/testPD";
protected Configuration conf;
protected FileSystem fs;
protected Path testDir;
-
+
@Before
public void initialize() throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
- testDir = new Path(
- System.getProperty("test.build.data", "build/test/data") + "/testPD"
- );
+ testDir = new Path(TEST_ROOT_DIR);
+
// don't want scheme on the path, just an absolute path
testDir = new Path(fs.makeQualified(testDir).toUri().getPath());
+ fs.mkdirs(testDir);
+
FileSystem.setDefaultUri(conf, fs.getUri());
fs.setWorkingDirectory(testDir);
fs.mkdirs(new Path("d1"));
@@ -60,6 +63,7 @@ public void initialize() throws Exception {
@After
public void cleanup() throws Exception {
+ fs.delete(testDir, true);
fs.close();
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index e1a440d0614..a32455604c9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -773,4 +773,34 @@ public void testInternalGetAclStatus() throws IOException {
assertFalse(aclStatus.isStickyBit());
}
+ @Test(expected=AccessControlException.class)
+ public void testInternalSetXAttr() throws IOException {
+ fsView.setXAttr(new Path("/internalDir"), "xattrName", null);
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalGetXAttr() throws IOException {
+ fsView.getXAttr(new Path("/internalDir"), "xattrName");
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalGetXAttrs() throws IOException {
+ fsView.getXAttrs(new Path("/internalDir"));
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalGetXAttrsWithNames() throws IOException {
+ fsView.getXAttrs(new Path("/internalDir"), new ArrayList());
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalListXAttr() throws IOException {
+ fsView.listXAttrs(new Path("/internalDir"));
+ }
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalRemoveXAttr() throws IOException {
+ fsView.removeXAttr(new Path("/internalDir"), "xattrName");
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
index 2813c34bef4..035b280249d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
@@ -747,4 +747,34 @@ public void testInternalGetAclStatus() throws IOException {
AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalSetXAttr() throws IOException {
+ fcView.setXAttr(new Path("/internalDir"), "xattrName", null);
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalGetXAttr() throws IOException {
+ fcView.getXAttr(new Path("/internalDir"), "xattrName");
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalGetXAttrs() throws IOException {
+ fcView.getXAttrs(new Path("/internalDir"));
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalGetXAttrsWithNames() throws IOException {
+ fcView.getXAttrs(new Path("/internalDir"), new ArrayList());
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalListXAttr() throws IOException {
+ fcView.listXAttrs(new Path("/internalDir"));
+ }
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalRemoveXAttr() throws IOException {
+ fcView.removeXAttr(new Path("/internalDir"), "xattrName");
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java
similarity index 95%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java
rename to hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java
index 31a18fb8815..c8fd754666c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.util;
+package org.apache.hadoop.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotSame;
@@ -29,7 +29,7 @@
import com.google.common.collect.Lists;
public class TestDirectBufferPool {
- final DirectBufferPool pool = new DirectBufferPool();
+ final org.apache.hadoop.util.DirectBufferPool pool = new org.apache.hadoop.util.DirectBufferPool();
@Test
public void testBasics() {
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
index 96286865385..b617ae5088d 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
@@ -53,7 +53,12 @@ public static synchronized NfsExports getInstance(Configuration conf) {
long expirationPeriodNano = conf.getLong(
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
- exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
+ try {
+ exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
+ } catch (IllegalArgumentException e) {
+ LOG.error("Invalid NFS Exports provided: ", e);
+ return exports;
+ }
}
return exports;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
index 9fbab240f6e..2814cb007e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
@@ -104,6 +104,10 @@ public XDR nullOp(XDR out, int xid, InetAddress client) {
@Override
public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
+ if (hostsMatcher == null) {
+ return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
+ null);
+ }
AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
if (accessPrivilege == AccessPrivilege.NONE) {
return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
@@ -208,16 +212,23 @@ public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
} else if (mntproc == MNTPROC.UMNTALL) {
umntall(out, xid, client);
} else if (mntproc == MNTPROC.EXPORT) {
- // Currently only support one NFS export
+ // Currently only support one NFS export
List hostsMatchers = new ArrayList();
- hostsMatchers.add(hostsMatcher);
- out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+ if (hostsMatcher != null) {
+ hostsMatchers.add(hostsMatcher);
+ out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+ } else {
+ // This means there are no valid exports provided.
+ RpcAcceptedReply.getInstance(xid,
+ RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
+ out);
+ }
} else {
// Invalid procedure
RpcAcceptedReply.getInstance(xid,
RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
out);
- }
+ }
ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
RpcUtil.sendRpcResponse(ctx, rsp);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index f254f50709d..1650b14724d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -2123,8 +2123,11 @@ private boolean checkAccessPrivilege(SocketAddress remoteAddress,
if (!doPortMonitoring(remoteAddress)) {
return false;
}
-
+
// Check export table
+ if (exports == null) {
+ return false;
+ }
InetAddress client = ((InetSocketAddress) remoteAddress).getAddress();
AccessPrivilege access = exports.getAccessPrivilege(client);
if (access == AccessPrivilege.NONE) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index af09ddb5efa..3b5cabf5920 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -272,6 +272,9 @@ Trunk (Unreleased)
HDFS-5794. Fix the inconsistency of layout version number of
ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
+ HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI.
+ (Vinayakumar B via wheat 9)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -334,6 +337,15 @@ Release 2.6.0 - UNRELEASED
HDFS-6701. Make seed optional in NetworkTopology#sortByDistance.
(Ashwin Shankar via wang)
+ HDFS-6755. There is an unnecessary sleep in the code path where
+ DFSOutputStream#close gives up its attempt to contact the namenode
+ (mitdesai21 via cmccabe)
+
+ HDFS-6750. The DataNode should use its shared memory segment to mark
+ short-circuit replicas that have been unlinked as stale (cmccabe)
+
+ HDFS-6739. Add getDatanodeStorageReport to ClientProtocol. (szetszwo)
+
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
@@ -377,6 +389,25 @@ Release 2.6.0 - UNRELEASED
HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will cause
a null pointer exception. (Masatake Iwasaki via brandonli)
+ HDFS-6114. Block Scan log rolling will never happen if blocks written
+ continuously leading to huge size of dncp_block_verification.log.curr
+ (vinayakumarb via cmccabe)
+
+ HDFS-6455. NFS: Exception should be added in NFS log for invalid separator in
+ nfs.exports.allowed.hosts. (Abhiraj Butala via brandonli)
+
+ HDFS-6715. Webhdfs wont fail over when it gets java.io.IOException: Namenode
+ is in startup mode. (jing9)
+
+ HDFS-5919. FileJournalManager doesn't purge empty and corrupt inprogress edits
+ files (vinayakumarb)
+
+ HDFS-6752. Avoid Address bind errors in TestDatanodeConfig#testMemlockLimit
+ (vinayakumarb)
+
+ HDFS-6749. FSNamesystem methods should call resolvePath.
+ (Charles Lamb via cnauroth)
+
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -399,6 +430,15 @@ Release 2.5.0 - UNRELEASED
HDFS-6406. Add capability for NFS gateway to reject connections from
unprivileged ports. (atm)
+ HDFS-2006. Ability to support storing extended attributes per file.
+
+ HDFS-5978. Create a tool to take fsimage and expose read-only WebHDFS API.
+ (Akira Ajisaka via wheat9)
+
+ HDFS-6278. Create HTML5-based UI for SNN. (wheat9)
+
+ HDFS-6279. Create new index page for JN / DN. (wheat9)
+
IMPROVEMENTS
HDFS-6007. Update documentation about short-circuit local reads (iwasakims
@@ -416,9 +456,6 @@ Release 2.5.0 - UNRELEASED
HDFS-6158. Clean up dead code for OfflineImageViewer. (wheat9)
- HDFS-5978. Create a tool to take fsimage and expose read-only WebHDFS API.
- (Akira Ajisaka via wheat9)
-
HDFS-6164. Remove lsr in OfflineImageViewer. (wheat9)
HDFS-6167. Relocate the non-public API classes in the hdfs.client package.
@@ -446,10 +483,6 @@ Release 2.5.0 - UNRELEASED
HDFS-6265. Prepare HDFS codebase for JUnit 4.11. (cnauroth)
- HDFS-6278. Create HTML5-based UI for SNN. (wheat9)
-
- HDFS-6279. Create new index page for JN / DN. (wheat9)
-
HDFS-5693. Few NN metrics data points were collected via JMX when NN
is under heavy load. (Ming Ma via jing9)
@@ -821,9 +854,6 @@ Release 2.5.0 - UNRELEASED
HDFS-6464. Support multiple xattr.name parameters for WebHDFS getXAttrs.
(Yi Liu via umamahesh)
- HDFS-6375. Listing extended attributes with the search permission.
- (Charles Lamb via wang)
-
HDFS-6539. test_native_mini_dfs is skipped in hadoop-hdfs/pom.xml
(decstery via cmccabe)
@@ -912,6 +942,27 @@ Release 2.5.0 - UNRELEASED
HDFS-6703. NFS: Files can be deleted from a read-only mount
(Srikanth Upputuri via brandonli)
+ HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code
+ when xattr doesn't exist. (Charles Lamb via umamahesh)
+
+ HDFS-6696. Name node cannot start if the path of a file under
+ construction contains ".snapshot". (wang)
+
+ HDFS-6312. WebHdfs HA failover is broken on secure clusters.
+ (daryn via tucu)
+
+ HDFS-6618. FSNamesystem#delete drops the FSN lock between removing INodes
+ from the tree and deleting them from the inode map (kihwal via cmccabe)
+
+ HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal
+ via cmccabe)
+
+ HDFS-6723. New NN webUI no longer displays decommissioned state for dead node.
+ (Ming Ma via wheat9)
+
+ HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config
+ (brandonli)
+
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
@@ -981,15 +1032,6 @@ Release 2.5.0 - UNRELEASED
HDFS-6492. Support create-time xattrs and atomically setting multiple
xattrs. (wang)
- HDFS-6312. WebHdfs HA failover is broken on secure clusters.
- (daryn via tucu)
-
- HDFS-6618. FSNamesystem#delete drops the FSN lock between removing INodes
- from the tree and deleting them from the inode map (kihwal via cmccabe)
-
- HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal
- via cmccabe)
-
Release 2.4.1 - 2014-06-23
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
index bb9612a9956..cd75e53b273 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
@@ -31,7 +31,7 @@
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
import org.apache.hadoop.util.DataChecksum;
import com.google.common.annotations.VisibleForTesting;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
index c68e548099b..47455754d72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
@@ -40,7 +40,7 @@
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 9edb3db6585..45a9011a568 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -32,19 +32,21 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
@@ -60,8 +62,6 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
import java.io.BufferedOutputStream;
import java.io.DataInputStream;
@@ -91,7 +91,6 @@
import javax.net.SocketFactory;
-import com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -112,22 +111,22 @@
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.XAttr;
-import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.VolumeId;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.net.TcpPeerServer;
+import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator;
@@ -158,8 +157,8 @@
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
-import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
@@ -175,6 +174,7 @@
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils;
@@ -200,6 +200,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
import com.google.common.net.InetAddresses;
/********************************************************
@@ -2192,6 +2193,11 @@ public DatanodeInfo[] datanodeReport(DatanodeReportType type)
return namenode.getDatanodeReport(type);
}
+ public DatanodeStorageReport[] getDatanodeStorageReport(
+ DatanodeReportType type) throws IOException {
+ return namenode.getDatanodeStorageReport(type);
+ }
+
/**
* Enter, leave or get safe mode.
*
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index a7cb92fa269..debf83ca1ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -2136,12 +2136,12 @@ private void completeFile(ExtendedBlock last) throws IOException {
throw new IOException(msg);
}
try {
- Thread.sleep(localTimeout);
if (retries == 0) {
throw new IOException("Unable to close file because the last block"
+ " does not have enough number of replicas.");
}
retries--;
+ Thread.sleep(localTimeout);
localTimeout *= 2;
if (Time.now() - localstart > 5000) {
DFSClient.LOG.info("Could not complete " + src + " retrying...");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index ad331d1e755..a2a52fef389 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
@@ -31,11 +32,10 @@
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.XAttr;
-import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
@@ -47,6 +47,7 @@
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.AtMostOnce;
@@ -654,6 +655,13 @@ public void renewLease(String clientName) throws AccessControlException,
public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
throws IOException;
+ /**
+ * Get a report on the current datanode storages.
+ */
+ @Idempotent
+ public DatanodeStorageReport[] getDatanodeStorageReport(
+ HdfsConstants.DatanodeReportType type) throws IOException;
+
/**
* Get the block size for the given file.
* @param filename The name of the file
@@ -1337,6 +1345,6 @@ public List listXAttrs(String src)
* @param xAttr XAttr
to remove
* @throws IOException
*/
- @Idempotent
+ @AtMostOnce
public void removeXAttr(String src, XAttr xAttr) throws IOException;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
index 3503554636a..0de445c222d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
@@ -27,7 +27,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
import org.apache.hadoop.io.IOUtils;
import com.google.common.base.Preconditions;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 3a312b0d418..df0d1b0006c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -72,6 +72,7 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
@@ -93,6 +94,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
@@ -174,7 +177,6 @@
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
@@ -655,6 +657,21 @@ public GetDatanodeReportResponseProto getDatanodeReport(
}
}
+ @Override
+ public GetDatanodeStorageReportResponseProto getDatanodeStorageReport(
+ RpcController controller, GetDatanodeStorageReportRequestProto req)
+ throws ServiceException {
+ try {
+ List reports = PBHelper.convertDatanodeStorageReports(
+ server.getDatanodeStorageReport(PBHelper.convert(req.getType())));
+ return GetDatanodeStorageReportResponseProto.newBuilder()
+ .addAllDatanodeStorageReports(reports)
+ .build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
@Override
public GetPreferredBlockSizeResponseProto getPreferredBlockSize(
RpcController controller, GetPreferredBlockSizeRequestProto req)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index d20ae1d14ea..0f8eba970ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -94,6 +94,7 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
@@ -151,6 +152,7 @@
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtobufHelper;
@@ -580,6 +582,20 @@ public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
}
}
+ @Override
+ public DatanodeStorageReport[] getDatanodeStorageReport(DatanodeReportType type)
+ throws IOException {
+ final GetDatanodeStorageReportRequestProto req
+ = GetDatanodeStorageReportRequestProto.newBuilder()
+ .setType(PBHelper.convert(type)).build();
+ try {
+ return PBHelper.convertDatanodeStorageReports(
+ rpcProxy.getDatanodeStorageReport(null, req).getDatanodeStorageReportsList());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
+
@Override
public long getPreferredBlockSize(String filename) throws IOException,
UnresolvedLinkException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index 5775d6e2634..46023ecaa34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -21,18 +21,13 @@
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto;
@@ -51,7 +46,6 @@
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -61,14 +55,10 @@
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
@@ -137,9 +127,7 @@ public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration,
.setRegistration(PBHelper.convert(registration))
.setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
.setFailedVolumes(failedVolumes);
- for (StorageReport r : reports) {
- builder.addReports(PBHelper.convert(r));
- }
+ builder.addAllReports(PBHelper.convertStorageReports(reports));
if (cacheCapacity != 0) {
builder.setCacheCapacity(cacheCapacity);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 791c51d6b2c..859542dae65 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -90,6 +90,7 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto;
@@ -102,14 +103,11 @@
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
@@ -125,6 +123,8 @@
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
@@ -149,6 +149,7 @@
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
@@ -182,6 +183,7 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
@@ -620,6 +622,41 @@ public static DatanodeInfoProto convert(DatanodeInfo info) {
return builder.build();
}
+ public static DatanodeStorageReportProto convertDatanodeStorageReport(
+ DatanodeStorageReport report) {
+ return DatanodeStorageReportProto.newBuilder()
+ .setDatanodeInfo(convert(report.getDatanodeInfo()))
+ .addAllStorageReports(convertStorageReports(report.getStorageReports()))
+ .build();
+ }
+
+ public static List convertDatanodeStorageReports(
+ DatanodeStorageReport[] reports) {
+ final List protos
+ = new ArrayList(reports.length);
+ for(int i = 0; i < reports.length; i++) {
+ protos.add(convertDatanodeStorageReport(reports[i]));
+ }
+ return protos;
+ }
+
+ public static DatanodeStorageReport convertDatanodeStorageReport(
+ DatanodeStorageReportProto proto) {
+ return new DatanodeStorageReport(
+ convert(proto.getDatanodeInfo()),
+ convertStorageReports(proto.getStorageReportsList()));
+ }
+
+ public static DatanodeStorageReport[] convertDatanodeStorageReports(
+ List protos) {
+ final DatanodeStorageReport[] reports
+ = new DatanodeStorageReport[protos.size()];
+ for(int i = 0; i < reports.length; i++) {
+ reports[i] = convertDatanodeStorageReport(protos.get(i));
+ }
+ return reports;
+ }
+
public static AdminStates convert(AdminState adminState) {
switch(adminState) {
case DECOMMISSION_INPROGRESS:
@@ -1717,6 +1754,15 @@ public static StorageReport[] convertStorageReports(
return report;
}
+ public static List convertStorageReports(StorageReport[] storages) {
+ final List protos = new ArrayList(
+ storages.length);
+ for(int i = 0; i < storages.length; i++) {
+ protos.add(convert(storages[i]));
+ }
+ return protos;
+ }
+
public static JournalInfo convert(JournalInfoProto info) {
int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0;
int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index a645d434985..fcc189d9f9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -259,6 +259,15 @@ DatanodeStorageInfo[] getStorageInfos() {
}
}
+ public StorageReport[] getStorageReports() {
+ final StorageReport[] reports = new StorageReport[storageMap.size()];
+ final DatanodeStorageInfo[] infos = getStorageInfos();
+ for(int i = 0; i < infos.length; i++) {
+ reports[i] = infos[i].toStorageReport();
+ }
+ return reports;
+ }
+
boolean hasStaleStorages() {
synchronized (storageMap) {
for (DatanodeStorageInfo storage : storageMap.values()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index 8d2104bfb11..64abddaa9c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -291,6 +291,12 @@ public int hashCode() {
public String toString() {
return "[" + storageType + "]" + storageID + ":" + state;
}
+
+ StorageReport toStorageReport() {
+ return new StorageReport(
+ new DatanodeStorage(storageID, state, storageType),
+ false, capacity, dfsUsed, remaining, blockPoolUsed);
+ }
static Iterable toStorageTypes(
final Iterable infos) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
index 1039b4fe922..bbb67fc4739 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
@@ -84,6 +84,10 @@ class BlockPoolSliceScanner {
private final SortedSet blockInfoSet
= new TreeSet(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR);
+
+ private final SortedSet newBlockInfoSet =
+ new TreeSet(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR);
+
private final GSet blockMap
= new LightWeightGSet(
LightWeightGSet.computeCapacity(0.5, "BlockMap"));
@@ -195,7 +199,7 @@ public LinkedElement getNext() {
BlockScanInfo info = new BlockScanInfo( block );
info.lastScanTime = scanTime--;
//still keep 'info.lastScanType' to NONE.
- addBlockInfo(info);
+ addBlockInfo(info, false);
}
RollingLogs rollingLogs = null;
@@ -221,25 +225,42 @@ private void updateBytesToScan(long len, long lastScanTime) {
// Should we change throttler bandwidth every time bytesLeft changes?
// not really required.
}
-
- private synchronized void addBlockInfo(BlockScanInfo info) {
- boolean added = blockInfoSet.add(info);
+
+ /**
+ * Add the BlockScanInfo to sorted set of blockScanInfo
+ * @param info BlockScanInfo to be added
+ * @param isNewBlock true if the block is the new Block, false if
+ * BlockScanInfo is being updated with new scanTime
+ */
+ private synchronized void addBlockInfo(BlockScanInfo info,
+ boolean isNewBlock) {
+ boolean added = false;
+ if (isNewBlock) {
+ // check whether the block already present
+ boolean exists = blockInfoSet.contains(info);
+ added = !exists && newBlockInfoSet.add(info);
+ } else {
+ added = blockInfoSet.add(info);
+ }
blockMap.put(info);
if (added) {
updateBytesToScan(info.getNumBytes(), info.lastScanTime);
}
}
-
+
private synchronized void delBlockInfo(BlockScanInfo info) {
boolean exists = blockInfoSet.remove(info);
+ if (!exists){
+ exists = newBlockInfoSet.remove(info);
+ }
blockMap.remove(info);
if (exists) {
updateBytesToScan(-info.getNumBytes(), info.lastScanTime);
}
}
-
+
/** Update blockMap by the given LogEntry */
private synchronized void updateBlockInfo(LogEntry e) {
BlockScanInfo info = blockMap.get(new Block(e.blockId, 0, e.genStamp));
@@ -249,7 +270,7 @@ private synchronized void updateBlockInfo(LogEntry e) {
delBlockInfo(info);
info.lastScanTime = e.verificationTime;
info.lastScanType = ScanType.VERIFICATION_SCAN;
- addBlockInfo(info);
+ addBlockInfo(info, false);
}
}
@@ -275,14 +296,14 @@ synchronized void addBlock(ExtendedBlock block) {
info = new BlockScanInfo(block.getLocalBlock());
info.lastScanTime = getNewBlockScanTime();
- addBlockInfo(info);
+ addBlockInfo(info, true);
adjustThrottler();
}
/** Deletes the block from internal structures */
synchronized void deleteBlock(Block block) {
BlockScanInfo info = blockMap.get(block);
- if ( info != null ) {
+ if (info != null) {
delBlockInfo(info);
}
}
@@ -319,7 +340,7 @@ private synchronized void updateScanStatus(BlockScanInfo info,
info.lastScanType = type;
info.lastScanTime = now;
info.lastScanOk = scanOk;
- addBlockInfo(info);
+ addBlockInfo(info, false);
// Don't update meta data if the verification failed.
if (!scanOk) {
@@ -578,7 +599,7 @@ private boolean assignInitialVerificationTimes() {
delBlockInfo(info);
info.lastScanTime = lastScanTime;
lastScanTime += verifyInterval;
- addBlockInfo(info);
+ addBlockInfo(info, false);
}
}
}
@@ -674,12 +695,21 @@ private void scan() {
throw e;
} finally {
rollVerificationLogs();
+ rollNewBlocksInfo();
if (LOG.isDebugEnabled()) {
LOG.debug("Done scanning block pool: " + blockPoolId);
}
}
}
-
+
+ // add new blocks to scan in next iteration
+ private synchronized void rollNewBlocksInfo() {
+ for (BlockScanInfo newBlock : newBlockInfoSet) {
+ blockInfoSet.add(newBlock);
+ }
+ newBlockInfoSet.clear();
+ }
+
private synchronized void rollVerificationLogs() {
if (verificationLog != null) {
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
index 9dba6a2085d..a252a17855a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
@@ -74,7 +74,7 @@
* DN also marks the block's slots as "unanchorable" to prevent additional
* clients from initiating these operations in the future.
*
- * The counterpart fo this class on the client is {@link DfsClientShmManager}.
+ * The counterpart of this class on the client is {@link DfsClientShmManager}.
*/
public class ShortCircuitRegistry {
public static final Log LOG = LogFactory.getLog(ShortCircuitRegistry.class);
@@ -217,7 +217,32 @@ public synchronized boolean processBlockMunlockRequest(
}
return allowMunlock;
}
-
+
+ /**
+ * Invalidate any slot associated with a blockId that we are invalidating
+ * (deleting) from this DataNode. When a slot is invalid, the DFSClient will
+ * not use the corresponding replica for new read or mmap operations (although
+ * existing, ongoing read or mmap operations will complete.)
+ *
+ * @param blockId The block ID.
+ */
+ public synchronized void processBlockInvalidation(ExtendedBlockId blockId) {
+ if (!enabled) return;
+ final Set affectedSlots = slots.get(blockId);
+ if (!affectedSlots.isEmpty()) {
+ final StringBuilder bld = new StringBuilder();
+ String prefix = "";
+ bld.append("Block ").append(blockId).append(" has been invalidated. ").
+ append("Marking short-circuit slots as invalid: ");
+ for (Slot slot : affectedSlots) {
+ slot.makeInvalid();
+ bld.append(prefix).append(slot.toString());
+ prefix = ", ";
+ }
+ LOG.info(bld.toString());
+ }
+ }
+
public static class NewShmInfo implements Closeable {
public final ShmId shmId;
public final FileInputStream stream;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index b068c664fe3..e8a06aec8ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -44,6 +44,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -1232,8 +1233,15 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
}
volumeMap.remove(bpid, invalidBlks[i]);
}
+
+ // If a DFSClient has the replica in its cache of short-circuit file
+ // descriptors (and the client is using ShortCircuitShm), invalidate it.
+ datanode.getShortCircuitRegistry().processBlockInvalidation(
+ new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));
+
// If the block is cached, start uncaching it.
cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());
+
// Delete the block asynchronously to make sure we can do it fast enough.
// It's ok to unlink the block file before the uncache operation
// finishes.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 85cfc1c7746..b2adcd455fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -1074,10 +1074,11 @@ void logSetXAttrs(String src, List xAttrs, boolean toLogRpcIds) {
logEdit(op);
}
- void logRemoveXAttrs(String src, List xAttrs) {
+ void logRemoveXAttrs(String src, List xAttrs, boolean toLogRpcIds) {
final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
op.src = src;
op.xAttrs = xAttrs;
+ logRpcIds(op, toLogRpcIds);
logEdit(op);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 858cd57b23f..a721491948d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -821,6 +821,10 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
fsDir.unprotectedRemoveXAttrs(removeXAttrOp.src,
removeXAttrOp.xAttrs);
+ if (toAddRetryCache) {
+ fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId,
+ removeXAttrOp.rpcCallId);
+ }
break;
}
default:
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index e972799b335..5543e0cb86e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -3551,6 +3551,7 @@ void readFields(DataInputStream in, int logVersion) throws IOException {
XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
src = p.getSrc();
xAttrs = PBHelper.convertXAttrs(p.getXAttrsList());
+ readRpcIds(in, logVersion);
}
@Override
@@ -3561,18 +3562,22 @@ public void writeFields(DataOutputStream out) throws IOException {
}
b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
b.build().writeDelimitedTo(out);
+ // clientId and callId
+ writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
appendXAttrsToXml(contentHandler, xAttrs);
+ appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
src = st.getValue("SRC");
xAttrs = readXAttrsFromXml(st);
+ readRpcIdsFromXml(st);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index af3ba44f250..af3cf2c06fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -614,6 +614,16 @@ private void loadFullNameINodes(long numFiles, DataInput in, Counter counter)
INodeDirectory parentINode = fsDir.rootDir;
for (long i = 0; i < numFiles; i++) {
pathComponents = FSImageSerialization.readPathComponents(in);
+ for (int j=0; j < pathComponents.length; j++) {
+ byte[] newComponent = renameReservedComponentOnUpgrade
+ (pathComponents[j], getLayoutVersion());
+ if (!Arrays.equals(newComponent, pathComponents[j])) {
+ String oldPath = DFSUtil.byteArray2PathString(pathComponents);
+ pathComponents[j] = newComponent;
+ String newPath = DFSUtil.byteArray2PathString(pathComponents);
+ LOG.info("Renaming reserved path " + oldPath + " to " + newPath);
+ }
+ }
final INode newNode = loadINode(
pathComponents[pathComponents.length-1], false, in, counter);
@@ -926,6 +936,7 @@ LayoutVersion.Feature.ADD_INODE_ID, getLayoutVersion())) {
oldnode = namesystem.dir.getInode(cons.getId()).asFile();
inSnapshot = true;
} else {
+ path = renameReservedPathsOnUpgrade(path, getLayoutVersion());
final INodesInPath iip = fsDir.getLastINodeInPath(path);
oldnode = INodeFile.valueOf(iip.getINode(0), path);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index d19c214ff31..bd509c2edd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -62,6 +62,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY;
@@ -83,9 +85,6 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT;
-
import static org.apache.hadoop.util.Time.now;
import java.io.BufferedWriter;
@@ -230,6 +229,7 @@
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
@@ -3723,8 +3723,10 @@ boolean isFileClosed(String src)
StandbyException, IOException {
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.READ);
+ byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
readLock();
try {
+ src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOperation(OperationCategory.READ);
if (isPermissionEnabled) {
checkTraverse(pc, src);
@@ -4917,6 +4919,28 @@ DatanodeInfo[] datanodeReport(final DatanodeReportType type
}
}
+ DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
+ ) throws AccessControlException, StandbyException {
+ checkSuperuserPrivilege();
+ checkOperation(OperationCategory.UNCHECKED);
+ readLock();
+ try {
+ checkOperation(OperationCategory.UNCHECKED);
+ final DatanodeManager dm = getBlockManager().getDatanodeManager();
+ final List datanodes = dm.getDatanodeListForReport(type);
+
+ DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
+ for (int i = 0; i < reports.length; i++) {
+ final DatanodeDescriptor d = datanodes.get(i);
+ reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
+ d.getStorageReports());
+ }
+ return reports;
+ } finally {
+ readUnlock();
+ }
+ }
+
/**
* Save namespace image.
* This will save current namespace into fsimage file and empty edits file.
@@ -8186,9 +8210,11 @@ AclStatus getAclStatus(String src) throws IOException {
nnConf.checkAclsConfigFlag();
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.READ);
+ byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
readLock();
try {
checkOperation(OperationCategory.READ);
+ src = FSDirectory.resolvePath(src, pathComponents, dir);
if (isPermissionEnabled) {
checkPermission(pc, src, false, null, null, null, null);
}
@@ -8282,16 +8308,19 @@ List getXAttrs(String src, List xAttrs) throws IOException {
nnConf.checkXAttrsConfigFlag();
FSPermissionChecker pc = getPermissionChecker();
boolean getAll = xAttrs == null || xAttrs.isEmpty();
- List filteredXAttrs = null;
if (!getAll) {
- filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs);
- if (filteredXAttrs.isEmpty()) {
- return filteredXAttrs;
+ try {
+ XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs);
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "getXAttrs", src);
+ throw e;
}
}
checkOperation(OperationCategory.READ);
+ byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
readLock();
try {
+ src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOperation(OperationCategory.READ);
if (isPermissionEnabled) {
checkPathAccess(pc, src, FsAction.READ);
@@ -8305,15 +8334,21 @@ List getXAttrs(String src, List xAttrs) throws IOException {
if (filteredAll == null || filteredAll.isEmpty()) {
return null;
}
- List toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size());
- for (XAttr xAttr : filteredXAttrs) {
+ List toGet = Lists.newArrayListWithCapacity(xAttrs.size());
+ for (XAttr xAttr : xAttrs) {
+ boolean foundIt = false;
for (XAttr a : filteredAll) {
if (xAttr.getNameSpace() == a.getNameSpace()
&& xAttr.getName().equals(a.getName())) {
toGet.add(a);
+ foundIt = true;
break;
}
}
+ if (!foundIt) {
+ throw new IOException(
+ "At least one of the attributes provided was not found.");
+ }
}
return toGet;
}
@@ -8329,8 +8364,10 @@ List listXAttrs(String src) throws IOException {
nnConf.checkXAttrsConfigFlag();
final FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.READ);
+ byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
readLock();
try {
+ src = FSDirectory.resolvePath(src, pathComponents, dir);
checkOperation(OperationCategory.READ);
if (isPermissionEnabled) {
/* To access xattr names, you need EXECUTE in the owning directory. */
@@ -8347,17 +8384,42 @@ List listXAttrs(String src) throws IOException {
readUnlock();
}
}
-
+
+ /**
+ * Remove an xattr for a file or directory.
+ *
+ * @param src
+ * - path to remove the xattr from
+ * @param xAttr
+ * - xAttr to remove
+ * @throws AccessControlException
+ * @throws SafeModeException
+ * @throws UnresolvedLinkException
+ * @throws IOException
+ */
void removeXAttr(String src, XAttr xAttr) throws IOException {
- nnConf.checkXAttrsConfigFlag();
- HdfsFileStatus resultingStat = null;
- FSPermissionChecker pc = getPermissionChecker();
+ CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+ if (cacheEntry != null && cacheEntry.isSuccess()) {
+ return; // Return previous response
+ }
+ boolean success = false;
try {
- XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+ removeXAttrInt(src, xAttr, cacheEntry != null);
+ success = true;
} catch (AccessControlException e) {
logAuditEvent(false, "removeXAttr", src);
throw e;
+ } finally {
+ RetryCache.setState(cacheEntry, success);
}
+ }
+
+ void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache)
+ throws IOException {
+ nnConf.checkXAttrsConfigFlag();
+ HdfsFileStatus resultingStat = null;
+ FSPermissionChecker pc = getPermissionChecker();
+ XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
@@ -8371,12 +8433,12 @@ void removeXAttr(String src, XAttr xAttr) throws IOException {
xAttrs.add(xAttr);
List removedXAttrs = dir.removeXAttrs(src, xAttrs);
if (removedXAttrs != null && !removedXAttrs.isEmpty()) {
- getEditLog().logRemoveXAttrs(src, removedXAttrs);
+ getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache);
+ } else {
+ throw new IOException(
+ "No matching attributes found for remove operation");
}
resultingStat = getAuditFileInfo(src, false);
- } catch (AccessControlException e) {
- logAuditEvent(false, "removeXAttr", src);
- throw e;
} finally {
writeUnlock();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index a41ff1390c5..362c316cc2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -71,6 +71,8 @@ public class FileJournalManager implements JournalManager {
NameNodeFile.EDITS.getName() + "_(\\d+)-(\\d+)");
private static final Pattern EDITS_INPROGRESS_REGEX = Pattern.compile(
NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+)");
+ private static final Pattern EDITS_INPROGRESS_STALE_REGEX = Pattern.compile(
+ NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+).*(\\S+)");
private File currentInProgress = null;
@@ -162,8 +164,7 @@ public void purgeLogsOlderThan(long minTxIdToKeep)
throws IOException {
LOG.info("Purging logs older than " + minTxIdToKeep);
File[] files = FileUtil.listFiles(sd.getCurrentDir());
- List editLogs =
- FileJournalManager.matchEditLogs(files);
+ List editLogs = matchEditLogs(files, true);
for (EditLogFile log : editLogs) {
if (log.getFirstTxId() < minTxIdToKeep &&
log.getLastTxId() < minTxIdToKeep) {
@@ -244,8 +245,13 @@ private void discardEditLogSegments(long startTxId) throws IOException {
public static List matchEditLogs(File logDir) throws IOException {
return matchEditLogs(FileUtil.listFiles(logDir));
}
-
+
static List matchEditLogs(File[] filesInStorage) {
+ return matchEditLogs(filesInStorage, false);
+ }
+
+ private static List matchEditLogs(File[] filesInStorage,
+ boolean forPurging) {
List ret = Lists.newArrayList();
for (File f : filesInStorage) {
String name = f.getName();
@@ -256,6 +262,7 @@ static List matchEditLogs(File[] filesInStorage) {
long startTxId = Long.parseLong(editsMatch.group(1));
long endTxId = Long.parseLong(editsMatch.group(2));
ret.add(new EditLogFile(f, startTxId, endTxId));
+ continue;
} catch (NumberFormatException nfe) {
LOG.error("Edits file " + f + " has improperly formatted " +
"transaction ID");
@@ -270,12 +277,30 @@ static List matchEditLogs(File[] filesInStorage) {
long startTxId = Long.parseLong(inProgressEditsMatch.group(1));
ret.add(
new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID, true));
+ continue;
} catch (NumberFormatException nfe) {
LOG.error("In-progress edits file " + f + " has improperly " +
"formatted transaction ID");
// skip
}
}
+ if (forPurging) {
+ // Check for in-progress stale edits
+ Matcher staleInprogressEditsMatch = EDITS_INPROGRESS_STALE_REGEX
+ .matcher(name);
+ if (staleInprogressEditsMatch.matches()) {
+ try {
+ long startTxId = Long.valueOf(staleInprogressEditsMatch.group(1));
+ ret.add(new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID,
+ true));
+ continue;
+ } catch (NumberFormatException nfe) {
+ LOG.error("In-progress stale edits file " + f + " has improperly "
+ + "formatted transaction ID");
+ // skip
+ }
+ }
+ }
}
return ret;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index f1f67247c26..2c2cd4f2272 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -115,6 +115,7 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
@@ -830,11 +831,23 @@ public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
throws IOException {
DatanodeInfo results[] = namesystem.datanodeReport(type);
if (results == null ) {
- throw new IOException("Cannot find datanode report");
+ throw new IOException("Failed to get datanode report for " + type
+ + " datanodes.");
}
return results;
}
+ @Override // ClientProtocol
+ public DatanodeStorageReport[] getDatanodeStorageReport(
+ DatanodeReportType type) throws IOException {
+ final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type);
+ if (reports == null ) {
+ throw new IOException("Failed to get datanode storage report for " + type
+ + " datanodes.");
+ }
+ return reports;
+ }
+
@Override // ClientProtocol
public boolean setSafeMode(SafeModeAction action, boolean isChecked)
throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
index 47f29399e5a..98730142fbd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.security.AccessControlException;
import com.google.common.collect.Lists;
+import com.google.common.base.Preconditions;
/**
* There are four types of extended attributes defined by the
@@ -60,8 +61,20 @@ static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr)
throw new AccessControlException("User doesn't have permission for xattr: "
+ XAttrHelper.getPrefixName(xAttr));
}
-
- static List filterXAttrsForApi(FSPermissionChecker pc,
+
+ static void checkPermissionForApi(FSPermissionChecker pc,
+ List xAttrs) throws AccessControlException {
+ Preconditions.checkArgument(xAttrs != null);
+ if (xAttrs.isEmpty()) {
+ return;
+ }
+
+ for (XAttr xAttr : xAttrs) {
+ checkPermissionForApi(pc, xAttr);
+ }
+ }
+
+ static List filterXAttrsForApi(FSPermissionChecker pc,
List xAttrs) {
assert xAttrs != null : "xAttrs can not be null";
if (xAttrs == null || xAttrs.isEmpty()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 858ce6e761b..237f2e901f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -111,6 +111,7 @@
import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
import org.apache.hadoop.net.Node;
@@ -188,7 +189,7 @@ private static NamenodeProtocols getRPCServer(NameNode namenode)
throws IOException {
final NamenodeProtocols np = namenode.getRpcServer();
if (np == null) {
- throw new IOException("Namenode is in startup mode");
+ throw new RetriableException("Namenode is in startup mode");
}
return np;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java
new file mode 100644
index 00000000000..6a956a0fac1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+
+/**
+ * Class captures information of a datanode and its storages.
+ */
+public class DatanodeStorageReport {
+ final DatanodeInfo datanodeInfo;
+ final StorageReport[] storageReports;
+
+ public DatanodeStorageReport(DatanodeInfo datanodeInfo,
+ StorageReport[] storageReports) {
+ this.datanodeInfo = datanodeInfo;
+ this.storageReports = storageReports;
+ }
+
+ public DatanodeInfo getDatanodeInfo() {
+ return datanodeInfo;
+ }
+
+ public StorageReport[] getStorageReports() {
+ return storageReports;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java
index 1c9a2e5a742..81cc68da072 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java
@@ -32,11 +32,16 @@
* DfsClientShm is a subclass of ShortCircuitShm which is used by the
* DfsClient.
* When the UNIX domain socket associated with this shared memory segment
- * closes unexpectedly, we mark the slots inside this segment as stale.
- * ShortCircuitReplica objects that contain stale slots are themselves stale,
+ * closes unexpectedly, we mark the slots inside this segment as disconnected.
+ * ShortCircuitReplica objects that contain disconnected slots are stale,
* and will not be used to service new reads or mmap operations.
* However, in-progress read or mmap operations will continue to proceed.
* Once the last slot is deallocated, the segment can be safely munmapped.
+ *
+ * Slots may also become stale because the associated replica has been deleted
+ * on the DataNode. In this case, the DataNode will clear the 'valid' bit.
+ * The client will then see these slots as stale (see
+ * #{ShortCircuitReplica#isStale}).
*/
public class DfsClientShm extends ShortCircuitShm
implements DomainSocketWatcher.Handler {
@@ -58,7 +63,7 @@ public class DfsClientShm extends ShortCircuitShm
*
* {@link DfsClientShm#handle} sets this to true.
*/
- private boolean stale = false;
+ private boolean disconnected = false;
DfsClientShm(ShmId shmId, FileInputStream stream, EndpointShmManager manager,
DomainPeer peer) throws IOException {
@@ -76,14 +81,14 @@ public DomainPeer getPeer() {
}
/**
- * Determine if the shared memory segment is stale.
+ * Determine if the shared memory segment is disconnected from the DataNode.
*
* This must be called with the DfsClientShmManager lock held.
*
* @return True if the shared memory segment is stale.
*/
- public synchronized boolean isStale() {
- return stale;
+ public synchronized boolean isDisconnected() {
+ return disconnected;
}
/**
@@ -97,8 +102,8 @@ public synchronized boolean isStale() {
public boolean handle(DomainSocket sock) {
manager.unregisterShm(getShmId());
synchronized (this) {
- Preconditions.checkState(!stale);
- stale = true;
+ Preconditions.checkState(!disconnected);
+ disconnected = true;
boolean hadSlots = false;
for (Iterator iter = slotIterator(); iter.hasNext(); ) {
Slot slot = iter.next();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
index ca9e8e6e0a5..6dbaf84d269 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
@@ -271,12 +271,12 @@ Slot allocSlot(DomainPeer peer, MutableBoolean usedPeer,
loading = false;
finishedLoading.signalAll();
}
- if (shm.isStale()) {
+ if (shm.isDisconnected()) {
// If the peer closed immediately after the shared memory segment
// was created, the DomainSocketWatcher callback might already have
- // fired and marked the shm as stale. In this case, we obviously
- // don't want to add the SharedMemorySegment to our list of valid
- // not-full segments.
+ // fired and marked the shm as disconnected. In this case, we
+ // obviously don't want to add the SharedMemorySegment to our list
+ // of valid not-full segments.
if (LOG.isDebugEnabled()) {
LOG.debug(this + ": the UNIX domain socket associated with " +
"this short-circuit memory closed before we could make " +
@@ -299,7 +299,7 @@ Slot allocSlot(DomainPeer peer, MutableBoolean usedPeer,
void freeSlot(Slot slot) {
DfsClientShm shm = (DfsClientShm)slot.getShm();
shm.unregisterSlot(slot.getSlotIdx());
- if (shm.isStale()) {
+ if (shm.isDisconnected()) {
// Stale shared memory segments should not be tracked here.
Preconditions.checkState(!full.containsKey(shm.getShmId()));
Preconditions.checkState(!notFull.containsKey(shm.getShmId()));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
index d860c8b174c..7b89d0a978d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
@@ -306,6 +306,13 @@ public int getSlotIdx() {
(slotAddress - baseAddress) / BYTES_PER_SLOT);
}
+ /**
+ * Clear the slot.
+ */
+ void clear() {
+ unsafe.putLongVolatile(null, this.slotAddress, 0);
+ }
+
private boolean isSet(long flag) {
long prev = unsafe.getLongVolatile(null, this.slotAddress);
return (prev & flag) != 0;
@@ -535,6 +542,7 @@ synchronized public final Slot allocAndRegisterSlot(
}
allocatedSlots.set(idx, true);
Slot slot = new Slot(calculateSlotAddress(idx), blockId);
+ slot.clear();
slot.makeValid();
slots[idx] = slot;
if (LOG.isTraceEnabled()) {
@@ -583,7 +591,7 @@ synchronized public final Slot registerSlot(int slotIdx,
Slot slot = new Slot(calculateSlotAddress(slotIdx), blockId);
if (!slot.isValid()) {
throw new InvalidRequestException(this + ": slot " + slotIdx +
- " has not been allocated.");
+ " is not marked as valid.");
}
slots[slotIdx] = slot;
allocatedSlots.set(slotIdx, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
index 3860f916e48..8137b4494f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
@@ -25,8 +25,8 @@ public class XAttrNameParam extends StringParam {
/** Default parameter value. **/
public static final String DEFAULT = "";
- private static Domain DOMAIN = new Domain(NAME,
- Pattern.compile("^(user\\.|trusted\\.|system\\.|security\\.).+"));
+ private static Domain DOMAIN = new Domain(NAME,
+ Pattern.compile(".*"));
public XAttrNameParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
index 5a75c41fb54..d2f92d64d0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
@@ -281,6 +281,19 @@ message GetDatanodeReportResponseProto {
repeated DatanodeInfoProto di = 1;
}
+message GetDatanodeStorageReportRequestProto {
+ required DatanodeReportTypeProto type = 1;
+}
+
+message DatanodeStorageReportProto {
+ required DatanodeInfoProto datanodeInfo = 1;
+ repeated StorageReportProto storageReports = 2;
+}
+
+message GetDatanodeStorageReportResponseProto {
+ repeated DatanodeStorageReportProto datanodeStorageReports = 1;
+}
+
message GetPreferredBlockSizeRequestProto {
required string filename = 1;
}
@@ -672,6 +685,8 @@ service ClientNamenodeProtocol {
rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto);
rpc getDatanodeReport(GetDatanodeReportRequestProto)
returns(GetDatanodeReportResponseProto);
+ rpc getDatanodeStorageReport(GetDatanodeStorageReportRequestProto)
+ returns(GetDatanodeStorageReportResponseProto);
rpc getPreferredBlockSize(GetPreferredBlockSizeRequestProto)
returns(GetPreferredBlockSizeResponseProto);
rpc setSafeMode(SetSafeModeRequestProto)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
index 2afcf057f70..187761a4502 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
@@ -44,20 +44,6 @@ message DatanodeRegistrationProto {
required string softwareVersion = 4; // Software version of the DN, e.g. "2.0.0"
}
-/**
- * Represents a storage available on the datanode
- */
-message DatanodeStorageProto {
- enum StorageState {
- NORMAL = 0;
- READ_ONLY_SHARED = 1;
- }
-
- required string storageUuid = 1;
- optional StorageState state = 2 [default = NORMAL];
- optional StorageTypeProto storageType = 3 [default = DISK];
-}
-
/**
* Commands sent from namenode to the datanodes
*/
@@ -196,16 +182,6 @@ message HeartbeatRequestProto {
optional uint64 cacheUsed = 7 [default = 0 ];
}
-message StorageReportProto {
- required string storageUuid = 1 [ deprecated = true ];
- optional bool failed = 2 [ default = false ];
- optional uint64 capacity = 3 [ default = 0 ];
- optional uint64 dfsUsed = 4 [ default = 0 ];
- optional uint64 remaining = 5 [ default = 0 ];
- optional uint64 blockPoolUsed = 6 [ default = 0 ];
- optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
-}
-
/**
* state - State the NN is in when returning response to the DN
* txid - Highest transaction ID this NN has seen
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index b5e3deb0158..12dbf01c8ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -99,6 +99,30 @@ message DatanodeInfoProto {
optional uint64 cacheUsed = 12 [default = 0];
}
+/**
+ * Represents a storage available on the datanode
+ */
+message DatanodeStorageProto {
+ enum StorageState {
+ NORMAL = 0;
+ READ_ONLY_SHARED = 1;
+ }
+
+ required string storageUuid = 1;
+ optional StorageState state = 2 [default = NORMAL];
+ optional StorageTypeProto storageType = 3 [default = DISK];
+}
+
+message StorageReportProto {
+ required string storageUuid = 1 [ deprecated = true ];
+ optional bool failed = 2 [ default = false ];
+ optional uint64 capacity = 3 [ default = 0 ];
+ optional uint64 dfsUsed = 4 [ default = 0 ];
+ optional uint64 remaining = 5 [ default = 0 ];
+ optional uint64 blockPoolUsed = 6 [ default = 0 ];
+ optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
+}
+
/**
* Summary of a file or directory
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index fadba070721..25895261982 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -66,7 +66,6 @@
@@ -283,7 +282,7 @@
{name} ({xferaddr}) |
{lastContact} |
- Dead{?decomissioned}, Decomissioned{/decomissioned} |
+ Dead{?decommissioned}, Decommissioned{/decommissioned} |
- |
- |
- |
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
index 99bb13b326c..aa62a372396 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
@@ -18,18 +18,7 @@
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-
+
Hadoop Administration
-
-
-Hadoop Administration
-
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html
index 97e0207e06f..f7ef858b9e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html
@@ -21,15 +21,4 @@
Hadoop Administration
-
-
-Hadoop Administration
-
-