diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 19e0c441964..204715631af 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -366,6 +366,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5276. FileSystem.Statistics should use thread-local counters to avoid
multi-threaded performance issues on read/write. (Colin Patrick McCabe)
+ HADOOP-9291. enhance unit-test coverage of package o.a.h.metrics2 (Ivan A.
+ Veselovsky via jeagles)
+
OPTIMIZATIONS
HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java
index 1f779735a59..07b50ab9774 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java
@@ -112,7 +112,7 @@ public abstract class AbstractPatternFilter extends MetricsFilter {
return false;
}
// Reject if no match in whitelist only mode
- if (ipat != null && epat == null) {
+ if (!includeTagPatterns.isEmpty() && excludeTagPatterns.isEmpty()) {
return false;
}
return true;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java
index 2f787d04492..be2149977cd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java
@@ -234,7 +234,7 @@
patterns.
Similarly, you can specify the record.filter and
- metrics.filter options, which operate at record and metric
+ metric.filter options, which operate at record and metric
level, respectively. Filters can be combined to optimize
the filtering efficiency.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/filter/TestPatternFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/filter/TestPatternFilter.java
index 2bdfdb978a9..a8f38d6136b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/filter/TestPatternFilter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/filter/TestPatternFilter.java
@@ -23,9 +23,11 @@ import java.util.List;
import org.apache.commons.configuration.SubsetConfiguration;
import org.junit.Test;
+
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
+import org.apache.hadoop.metrics2.MetricsFilter;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.impl.ConfigBuilder;
@@ -53,7 +55,7 @@ public class TestPatternFilter {
.add("p.include.tags", "foo:f").subset("p");
shouldAccept(wl, "foo");
shouldAccept(wl, Arrays.asList(tag("bar", "", ""),
- tag("foo", "", "f")));
+ tag("foo", "", "f")), new boolean[] {false, true});
shouldAccept(wl, mockMetricsRecord("foo", Arrays.asList(
tag("bar", "", ""), tag("foo", "", "f"))));
shouldReject(wl, "bar");
@@ -78,7 +80,7 @@ public class TestPatternFilter {
tag("bar", "", ""))));
shouldReject(bl, "foo");
shouldReject(bl, Arrays.asList(tag("bar", "", ""),
- tag("foo", "", "f")));
+ tag("foo", "", "f")), new boolean[] {true, false});
shouldReject(bl, mockMetricsRecord("foo", Arrays.asList(
tag("bar", "", ""))));
shouldReject(bl, mockMetricsRecord("bar", Arrays.asList(
@@ -125,15 +127,61 @@ public class TestPatternFilter {
shouldAccept(c, mockMetricsRecord("foo", Arrays.asList(
tag("foo", "", "f"))));
}
-
+
static void shouldAccept(SubsetConfiguration conf, String s) {
assertTrue("accepts "+ s, newGlobFilter(conf).accepts(s));
assertTrue("accepts "+ s, newRegexFilter(conf).accepts(s));
}
+ // Version for one tag:
static void shouldAccept(SubsetConfiguration conf, List tags) {
- assertTrue("accepts "+ tags, newGlobFilter(conf).accepts(tags));
- assertTrue("accepts "+ tags, newRegexFilter(conf).accepts(tags));
+ shouldAcceptImpl(true, conf, tags, new boolean[] {true});
+ }
+ // Version for multiple tags:
+ static void shouldAccept(SubsetConfiguration conf, List tags,
+ boolean[] expectedAcceptedSpec) {
+ shouldAcceptImpl(true, conf, tags, expectedAcceptedSpec);
+ }
+
+ // Version for one tag:
+ static void shouldReject(SubsetConfiguration conf, List tags) {
+ shouldAcceptImpl(false, conf, tags, new boolean[] {false});
+ }
+ // Version for multiple tags:
+ static void shouldReject(SubsetConfiguration conf, List tags,
+ boolean[] expectedAcceptedSpec) {
+ shouldAcceptImpl(false, conf, tags, expectedAcceptedSpec);
+ }
+
+ private static void shouldAcceptImpl(final boolean expectAcceptList,
+ SubsetConfiguration conf, List tags, boolean[] expectedAcceptedSpec) {
+ final MetricsFilter globFilter = newGlobFilter(conf);
+ final MetricsFilter regexFilter = newRegexFilter(conf);
+
+ // Test acceptance of the tag list:
+ assertEquals("accepts "+ tags, expectAcceptList, globFilter.accepts(tags));
+ assertEquals("accepts "+ tags, expectAcceptList, regexFilter.accepts(tags));
+
+ // Test results on each of the individual tags:
+ int acceptedCount = 0;
+ for (int i=0; i 0);
+ } else {
+ // At least one individual tag should be rejected:
+ assertTrue("No tag of the following rejected: " + tags, acceptedCount < tags.size());
+ }
}
/**
@@ -152,11 +200,6 @@ public class TestPatternFilter {
assertTrue("rejects "+ s, !newRegexFilter(conf).accepts(s));
}
- static void shouldReject(SubsetConfiguration conf, List tags) {
- assertTrue("rejects "+ tags, !newGlobFilter(conf).accepts(tags));
- assertTrue("rejects "+ tags, !newRegexFilter(conf).accepts(tags));
- }
-
/**
* Asserts that filters with the given configuration reject the given record.
*
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java
new file mode 100644
index 00000000000..8c918b8431b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.sink;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.annotation.Metric.Type;
+import org.apache.hadoop.metrics2.impl.ConfigBuilder;
+import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
+import org.apache.hadoop.metrics2.impl.TestMetricsConfig;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.junit.After;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestFileSink {
+
+ private File outFile;
+
+ // The 2 sample metric classes:
+ @Metrics(name="testRecord1", context="test1")
+ static class MyMetrics1 {
+ @Metric(value={"testTag1", ""}, type=Type.TAG)
+ String testTag1() { return "testTagValue1"; }
+
+ @Metric(value={"testTag2", ""}, type=Type.TAG)
+ String gettestTag2() { return "testTagValue2"; }
+
+ @Metric(value={"testMetric1", "An integer gauge"},always=true)
+ MutableGaugeInt testMetric1;
+
+ @Metric(value={"testMetric2", "An integer gauge"},always=true)
+ MutableGaugeInt testMetric2;
+
+ public MyMetrics1 registerWith(MetricsSystem ms) {
+ return ms.register("m1", null, this);
+ }
+ }
+
+ @Metrics(name="testRecord2", context="test1")
+ static class MyMetrics2 {
+ @Metric(value={"testTag22", ""}, type=Type.TAG)
+ String testTag1() { return "testTagValue22"; }
+
+ public MyMetrics2 registerWith(MetricsSystem ms) {
+ return ms.register("m2", null, this);
+ }
+ }
+
+ private File getTestTempFile(String prefix, String suffix) throws IOException {
+ String tmpPath = System.getProperty("java.io.tmpdir", "/tmp");
+ String user = System.getProperty("user.name", "unknown-user");
+ File dir = new File(tmpPath + "/" + user);
+ dir.mkdirs();
+ return File.createTempFile(prefix, suffix, dir);
+ }
+
+ @Test(timeout=6000)
+ public void testFileSink() throws IOException {
+ outFile = getTestTempFile("test-file-sink-", ".out");
+ final String outPath = outFile.getAbsolutePath();
+
+ // NB: specify large period to avoid multiple metrics snapshotting:
+ new ConfigBuilder().add("*.period", 10000)
+ .add("test.sink.mysink0.class", FileSink.class.getName())
+ .add("test.sink.mysink0.filename", outPath)
+ // NB: we filter by context to exclude "metricssystem" context metrics:
+ .add("test.sink.mysink0.context", "test1")
+ .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
+ MetricsSystemImpl ms = new MetricsSystemImpl("test");
+ ms.start();
+
+ final MyMetrics1 mm1
+ = new MyMetrics1().registerWith(ms);
+ new MyMetrics2().registerWith(ms);
+
+ mm1.testMetric1.incr();
+ mm1.testMetric2.incr(2);
+
+ ms.publishMetricsNow(); // publish the metrics
+ ms.stop();
+ ms.shutdown();
+
+ InputStream is = new FileInputStream(outFile);
+ ByteArrayOutputStream baos = new ByteArrayOutputStream((int)outFile.length());
+ IOUtils.copyBytes(is, baos, 1024, true);
+ String outFileContent = new String(baos.toByteArray(), "UTF-8");
+
+ // Check the out file content. Should be something like the following:
+ //1360244820087 test1.testRecord1: Context=test1, testTag1=testTagValue1, testTag2=testTagValue2, Hostname=myhost, testMetric1=1, testMetric2=2
+ //1360244820089 test1.testRecord2: Context=test1, testTag22=testTagValue22, Hostname=myhost
+
+ // Note that in the below expression we allow tags and metrics to go in arbitrary order.
+ Pattern expectedContentPattern = Pattern.compile(
+ // line #1:
+ "^\\d+\\s+test1.testRecord1:\\s+Context=test1,\\s+" +
+ "(testTag1=testTagValue1,\\s+testTag2=testTagValue2|testTag2=testTagValue2,\\s+testTag1=testTagValue1)," +
+ "\\s+Hostname=.*,\\s+(testMetric1=1,\\s+testMetric2=2|testMetric2=2,\\s+testMetric1=1)" +
+ // line #2:
+ "$[\\n\\r]*^\\d+\\s+test1.testRecord2:\\s+Context=test1," +
+ "\\s+testTag22=testTagValue22,\\s+Hostname=.*$[\\n\\r]*",
+ Pattern.MULTILINE);
+ assertTrue(expectedContentPattern.matcher(outFileContent).matches());
+ }
+
+ @After
+ public void after() {
+ if (outFile != null) {
+ outFile.delete();
+ assertTrue(!outFile.exists());
+ }
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e6e096ab122..cfffa11abd0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -261,6 +261,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5379. Update links to datanode information in dfshealth.html. (Haohui
Mai via jing9)
+ HDFS-5382. Implement the UI of browsing filesystems in HTML 5 page. (Haohui
+ Mai via jing9)
+
IMPROVEMENTS
HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
@@ -329,6 +332,9 @@ Release 2.3.0 - UNRELEASED
HDFS-4511. Cover package org.apache.hadoop.hdfs.tools with unit test
(Andrey Klochkov via jeagles)
+ HDFS-4885. Improve the verifyBlockPlacement() API in BlockPlacementPolicy.
+ (Junping Du via szetszwo)
+
OPTIMIZATIONS
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
@@ -369,6 +375,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5336. DataNode should not output 'StartupProgress' metrics.
(Akira Ajisaka via cnauroth)
+ HDFS-5400. DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT constant is set
+ to the wrong value. (Colin Patrick McCabe)
+
Release 2.2.1 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -419,6 +428,8 @@ Release 2.2.1 - UNRELEASED
and range in error message. (Kousuke Saruta via suresh)
HDFS-5365. Fix libhdfs compile error on FreeBSD9. (Radim Kolar via cnauroth)
+
+ HDFS-5347. Add HDFS NFS user guide. (brandonli)
Release 2.2.0 - 2013-10-13
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 86a3c5ae6a9..4fa7213746b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -550,6 +550,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
src/main/webapps/static/dust-full-2.0.0.min.jssrc/main/webapps/static/dust-helpers-1.1.1.min.jssrc/main/webapps/hdfs/dfshealth.dust.html
+ src/main/webapps/hdfs/explorer-block-info.dust.html
+ src/main/webapps/hdfs/explorer.dust.html
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 8d41ae2b2c2..0f9c8244d11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -383,7 +383,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_CLIENT_MMAP_CACHE_SIZE_DEFAULT = 1024;
public static final String DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS = "dfs.client.mmap.cache.timeout.ms";
public static final long DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS_DEFAULT = 15 * 60 * 1000;
- public static final String DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT = "dfs.client.mmap.cache.timeout.ms";
+ public static final String DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT = "dfs.client.mmap.cache.thread.runs.per.timeout";
public static final int DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT_DEFAULT = 4;
// property for fsimage compression
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
index 5d9262d6022..73f474cba4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
@@ -100,18 +100,17 @@ public abstract class BlockPlacementPolicy {
}
/**
- * Verify that the block is replicated on at least minRacks different racks
- * if there is more than minRacks rack in the system.
+ * Verify if the block's placement meets requirement of placement policy,
+ * i.e. replicas are placed on no less than minRacks racks in the system.
*
* @param srcPath the full pathname of the file to be verified
* @param lBlk block with locations
- * @param minRacks number of racks the block should be replicated to
- * @return the difference between the required and the actual number of racks
- * the block is replicated to.
+ * @param numOfReplicas replica number of file to be verified
+ * @return the result of verification
*/
- abstract public int verifyBlockPlacement(String srcPath,
- LocatedBlock lBlk,
- int minRacks);
+ abstract public BlockPlacementStatus verifyBlockPlacement(String srcPath,
+ LocatedBlock lBlk,
+ int numOfReplicas);
/**
* Decide whether deleting the specified replica of the block still makes
* the block conform to the configured block placement policy.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 493e6f87c26..99a40e38b3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -698,22 +698,22 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
}
@Override
- public int verifyBlockPlacement(String srcPath,
- LocatedBlock lBlk,
- int minRacks) {
+ public BlockPlacementStatus verifyBlockPlacement(String srcPath,
+ LocatedBlock lBlk, int numberOfReplicas) {
DatanodeInfo[] locs = lBlk.getLocations();
if (locs == null)
locs = DatanodeDescriptor.EMPTY_ARRAY;
int numRacks = clusterMap.getNumOfRacks();
if(numRacks <= 1) // only one rack
- return 0;
- minRacks = Math.min(minRacks, numRacks);
+ return new BlockPlacementStatusDefault(
+ Math.min(numRacks, numberOfReplicas), numRacks);
+ int minRacks = Math.min(2, numberOfReplicas);
// 1. Check that all locations are different.
// 2. Count locations on different racks.
Set racks = new TreeSet();
for (DatanodeInfo dn : locs)
racks.add(dn.getNetworkLocation());
- return minRacks - racks.size();
+ return new BlockPlacementStatusDefault(racks.size(), minRacks);
}
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java
new file mode 100644
index 00000000000..e2ac54a3537
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface BlockPlacementStatus {
+
+ /**
+ * Boolean value to identify if replicas of this block satisfy requirement of
+ * placement policy
+ * @return if replicas satisfy placement policy's requirement
+ */
+ public boolean isPlacementPolicySatisfied();
+
+ /**
+ * Get description info for log or printed in case replicas are failed to meet
+ * requirement of placement policy
+ * @return description in case replicas are failed to meet requirement of
+ * placement policy
+ */
+ public String getErrorDescription();
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java
new file mode 100644
index 00000000000..0b8b9659601
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+public class BlockPlacementStatusDefault implements BlockPlacementStatus {
+
+ private int requiredRacks = 0;
+ private int currentRacks = 0;
+
+ public BlockPlacementStatusDefault(int currentRacks, int requiredRacks){
+ this.requiredRacks = requiredRacks;
+ this.currentRacks = currentRacks;
+ }
+
+ @Override
+ public boolean isPlacementPolicySatisfied() {
+ return requiredRacks <= currentRacks;
+ }
+
+ @Override
+ public String getErrorDescription() {
+ if (isPlacementPolicySatisfied()) {
+ return null;
+ }
+ return "Block should be additionally replicated on " +
+ (requiredRacks - currentRacks) + " more rack(s).";
+ }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
index 973d0916b90..6e8d605d767 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
@@ -413,8 +413,15 @@ public class DatanodeWebHdfsMethods {
final long n = length.getValue() != null ?
Math.min(length.getValue(), in.getVisibleLength() - offset.getValue()) :
in.getVisibleLength() - offset.getValue();
- return Response.ok(new OpenEntity(in, n, dfsclient)).type(
- MediaType.APPLICATION_OCTET_STREAM).build();
+
+ /**
+ * Allow the Web UI to perform an AJAX request to get the data.
+ */
+ return Response.ok(new OpenEntity(in, n, dfsclient))
+ .type(MediaType.APPLICATION_OCTET_STREAM)
+ .header("Access-Control-Allow-Methods", "GET")
+ .header("Access-Control-Allow-Origin", "*")
+ .build();
}
case GETFILECHECKSUM:
{
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 14582dccd09..b933387a31b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.net.NetUtils;
@@ -374,9 +375,10 @@ public class NamenodeFsck {
locs.length + " replica(s).");
}
// verify block placement policy
- int missingRacks = BlockPlacementPolicy.getInstance(conf, null, networktopology).
- verifyBlockPlacement(path, lBlk, Math.min(2,targetFileReplication));
- if (missingRacks > 0) {
+ BlockPlacementStatus blockPlacementStatus =
+ BlockPlacementPolicy.getInstance(conf, null, networktopology).
+ verifyBlockPlacement(path, lBlk, targetFileReplication);
+ if (!blockPlacementStatus.isPlacementPolicySatisfied()) {
res.numMisReplicatedBlocks++;
misReplicatedPerFile++;
if (!showFiles) {
@@ -385,9 +387,7 @@ public class NamenodeFsck {
out.print(path + ": ");
}
out.println(" Replica placement policy is violated for " +
- block +
- ". Block should be additionally replicated on " +
- missingRacks + " more rack(s).");
+ block + ". " + blockPlacementStatus.getErrorDescription());
}
report.append(i + ". " + blkName + " len=" + block.getNumBytes());
if (locs.length == 0) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html
index 9924825ea55..e7bb5a2b123 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html
@@ -47,7 +47,7 @@
-Browse the filesystemNameNode Logs
+Browse the filesystemNameNode Logs
@@ -56,7 +56,7 @@
- Security is {#nnstat}{#SecurityModeEnabled}on{:else}off{/SecurityModeEnabled}{/nnstat}.
+ Security is {#nnstat}{#SecurityEnabled}on{:else}off{/SecurityEnabled}{/nnstat}.
{#nn}{#Safemode}{.}{:else}Safemode is off.{/Safemode}{/nn}
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
new file mode 100644
index 00000000000..72d3c8d0495
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+(function() {
+ "use strict";
+
+ // The chunk size of tailing the files, i.e., how many bytes will be shown
+ // in the preview.
+ var TAIL_CHUNK_SIZE = 32768;
+ var helpers = {
+ 'helper_to_permission': function(chunk, ctx, bodies, params) {
+ var p = ctx.current().permission;
+ var dir = ctx.current().type == 'DIRECTORY' ? 'd' : '-';
+ var symbols = [ '---', '--x', '-w-', '-wx', 'r--', 'r-x', 'rw-', 'rwx' ];
+ var sticky = p > 1000;
+
+ var res = "";
+ for (var i = 0; i < 3; ++i) {
+ res = symbols[(p % 10)] + res;
+ p = Math.floor(p / 10);
+ }
+
+ if (sticky) {
+ var exec = ((parms.perm % 10) & 1) == 1;
+ res[res.length - 1] = exec ? 't' : 'T';
+ }
+
+ chunk.write(dir + res);
+ return chunk;
+ }
+ };
+
+ var base = dust.makeBase(helpers);
+ var current_directory = "";
+
+ function show_err_msg(msg) {
+ $('#alert-panel-body').html(msg);
+ $('#alert-panel').show();
+ }
+
+ function network_error_handler(url) {
+ return function (jqxhr, text, err) {
+ var msg = '
Failed to retreive data from ' + url + ', cause: ' + err + '
WebHDFS might be disabled. WebHDFS is required to browse the filesystem.
';
+ }
+ show_err_msg(msg);
+ };
+ }
+
+ function append_path(prefix, s) {
+ var l = prefix.length;
+ var p = l > 0 && prefix[l - 1] == '/' ? prefix.substring(0, l - 1) : prefix;
+ return p + '/' + s;
+ }
+
+ function get_response(data, type) {
+ return data[type] !== undefined ? data[type] : null;
+ }
+
+ function get_response_err_msg(data) {
+ var msg = data.RemoteException !== undefined ? data.RemoteException.message : "";
+ return msg;
+ }
+
+ function view_file_details(path, abs_path) {
+ function show_block_info(blocks) {
+ var menus = $('#file-info-blockinfo-list');
+ menus.empty();
+
+ menus.data("blocks", blocks);
+ menus.change(function() {
+ var d = $(this).data('blocks')[$(this).val()];
+ if (d === undefined) {
+ return;
+ }
+
+ dust.render('block-info', d, function(err, out) {
+ $('#file-info-blockinfo-body').html(out);
+ });
+
+ });
+ for (var i = 0; i < blocks.length; ++i) {
+ var item = $('');
+ menus.append(item);
+ }
+ menus.change();
+ }
+
+ var url = '/webhdfs/v1' + abs_path + '?op=GET_BLOCK_LOCATIONS';
+ $.ajax({"url": url, "crossDomain": true}).done(function(data) {
+ var d = get_response(data, "LocatedBlocks");
+ if (d === null) {
+ show_err_msg(get_response_err_msg(data));
+ return;
+ }
+
+ $('#file-info-tail').hide();
+ $('#file-info-title').text("File information - " + path);
+
+ var download_url = '/webhdfs/v1' + abs_path + '/?op=OPEN';
+
+ $('#file-info-download').attr('href', download_url);
+ $('#file-info-preview').click(function() {
+ var offset = d.fileLength - TAIL_CHUNK_SIZE;
+ var url = offset > 0 ? download_url + '&offset=' + offset : download_url;
+ $.get(url, function(t) {
+ $('#file-info-preview-body').val(t);
+ $('#file-info-tail').show();
+ }, "text").error(network_error_handler(url));
+ });
+
+ if (d.fileLength > 0) {
+ show_block_info(d.locatedBlocks);
+ $('#file-info-blockinfo-panel').show();
+ } else {
+ $('#file-info-blockinfo-panel').hide();
+ }
+ $('#file-info').modal();
+ }).error(network_error_handler(url));
+ }
+
+ function browse_directory(dir) {
+ var url = '/webhdfs/v1' + dir + '?op=LISTSTATUS';
+ $.get(url, function(data) {
+ var d = get_response(data, "FileStatuses");
+ if (d === null) {
+ show_err_msg(get_response_err_msg(data));
+ return;
+ }
+
+ current_directory = dir;
+ $('#directory').val(dir);
+ dust.render('explorer', base.push(d), function(err, out) {
+ $('#panel').html(out);
+
+ $('.explorer-browse-links').click(function() {
+ var type = $(this).attr('inode-type');
+ var path = $(this).attr('inode-path');
+ var abs_path = append_path(current_directory, path);
+ if (type == 'DIRECTORY') {
+ browse_directory(abs_path);
+ } else {
+ view_file_details(path, abs_path);
+ }
+ });
+ });
+ }).error(network_error_handler(url));
+ }
+
+
+ function init() {
+ var templates = [
+ { 'name': 'explorer', 'url': 'explorer.dust.html'},
+ { 'name': 'block-info', 'url': 'explorer-block-info.dust.html'}
+ ];
+
+ load_templates(dust, templates, function () {
+ var b = function() { browse_directory($('#directory').val()); };
+ $('#btn-nav-directory').click(b);
+ browse_directory('/');
+ }, function (url, jqxhr, text, err) {
+ network_error_handler(url)(jqxhr, text, err);
+ });
+ }
+
+ init();
+})();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
new file mode 100644
index 00000000000..c8de842510d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
@@ -0,0 +1,258 @@
+
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~ http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License. See accompanying LICENSE file.
+
+ ---
+ Hadoop Distributed File System-${project.version} - HDFS NFS Gateway
+ ---
+ ---
+ ${maven.build.timestamp}
+
+HDFS NFS Gateway
+
+ \[ {{{./index.html}Go Back}} \]
+
+%{toc|section=1|fromDepth=0}
+
+* {Overview}
+
+ The NFS Gateway supports NFSv3 and allows HDFS to be mounted as part of the client's local file system.
+ Currently NFS Gateway supports and enables the following usage patterns:
+
+ * Users can browse the HDFS file system through their local file system
+ on NFSv3 client compatible operating systems.
+
+ * Users can download files from the the HDFS file system on to their
+ local file system.
+
+ * Users can upload files from their local file system directly to the
+ HDFS file system.
+
+ * Users can stream data directly to HDFS through the mount point. File
+ append is supported but random write is not supported.
+
+ The NFS gateway machine needs the same thing to run an HDFS client like Hadoop JAR files, HADOOP_CONF directory.
+ The NFS gateway can be on the same host as DataNode, NameNode, or any HDFS client.
+
+
+* {Configuration}
+
+ NFS gateway can work with its default settings in most cases. However, it's
+ strongly recommended for the users to update a few configuration properties based on their use
+ cases. All the related configuration properties can be added or updated in hdfs-site.xml.
+
+ * If the client mounts the export with access time update allowed, make sure the following
+ property is not disabled in the configuration file. Only NameNode needs to restart after
+ this property is changed. On some Unix systems, the user can disable access time update
+ by mounting the export with "noatime".
+
+----
+
+ dfs.access.time.precision
+ 3600000
+ The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+
+
+----
+
+ * Users are expected to update the file dump directory. NFS client often
+ reorders writes. Sequential writes can arrive at the NFS gateway at random
+ order. This directory is used to temporarily save out-of-order writes
+ before writing to HDFS. For each file, the out-of-order writes are dumped after
+ they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
+ One needs to make sure the directory has enough
+ space. For example, if the application uploads 10 files with each having
+ 100MB, it is recommended for this directory to have roughly 1GB space in case if a
+ worst-case write reorder happens to every file. Only NFS gateway needs to restart after
+ this property is updated.
+
+----
+
+ dfs.nfs3.dump.dir
+ /tmp/.hdfs-nfs
+
+----
+
+ * By default, the export can be mounted by any client. To better control the access,
+ users can update the following property. The value string contains machine name and
+ access privilege, separated by whitespace
+ characters. Machine name format can be single host, wildcards, and IPv4 networks.The
+ access privilege uses rw or ro to specify readwrite or readonly access of the machines to exports. If the access
+ privilege is not provided, the default is read-only. Entries are separated by ";".
+ For example: "192.168.0.0/22 rw ; host*.example.com ; host1.test.org ro;". Only NFS gateway needs to restart after
+ this property is updated.
+
+----
+
+ dfs.nfs.exports.allowed.hosts
+ * rw
+
+----
+
+ * Customize log settings. To get NFS debug trace, users can edit the log4j.property file
+ to add the following. Note, debug trace, especially for ONCRPC, can be very verbose.
+
+ To change logging level:
+
+-----------------------------------------------
+ log4j.logger.org.apache.hadoop.hdfs.nfs=DEBUG
+-----------------------------------------------
+
+ To get more details of ONCRPC requests:
+
+-----------------------------------------------
+ log4j.logger.org.apache.hadoop.oncrpc=DEBUG
+-----------------------------------------------
+
+
+* {Start and stop NFS gateway service}
+
+ Three daemons are required to provide NFS service: rpcbind (or portmap), mountd and nfsd.
+ The NFS gateway process has both nfsd and mountd. It shares the HDFS root "/" as the
+ only export. It is recommended to use the portmap included in NFS gateway package. Even
+ though NFS gateway works with portmap/rpcbind provide by most Linux distributions, the
+ package included portmap is needed on some Linux systems such as REHL6.2 due to an
+ {{{https://bugzilla.redhat.com/show_bug.cgi?id=731542}rpcbind bug}}. More detailed discussions can
+ be found in {{{https://issues.apache.org/jira/browse/HDFS-4763}HDFS-4763}}.
+
+ [[1]] Stop nfs/rpcbind/portmap services provided by the platform (commands can be different on various Unix platforms):
+
+-------------------------
+ service nfs stop
+
+ service rpcbind stop
+-------------------------
+
+
+ [[2]] Start package included portmap (needs root privileges):
+
+-------------------------
+ hadoop portmap
+
+ OR
+
+ hadoop-daemon.sh start portmap
+-------------------------
+
+ [[3]] Start mountd and nfsd.
+
+ No root privileges are required for this command. However, ensure that the user starting
+ the Hadoop cluster and the user starting the NFS gateway are same.
+
+-------------------------
+ hadoop nfs3
+
+ OR
+
+ hadoop-daemon.sh start nfs3
+-------------------------
+
+ Note, if the hadoop-daemon.sh script starts the NFS gateway, its log can be found in the hadoop log folder.
+
+
+ [[4]] Stop NFS gateway services.
+
+-------------------------
+ hadoop-daemon.sh stop nfs3
+
+ hadoop-daemon.sh stop portmap
+-------------------------
+
+
+* {Verify validity of NFS related services}
+
+ [[1]] Execute the following command to verify if all the services are up and running:
+
+-------------------------
+ rpcinfo -p $nfs_server_ip
+-------------------------
+
+ You should see output similar to the following:
+
+-------------------------
+ program vers proto port
+
+ 100005 1 tcp 4242 mountd
+
+ 100005 2 udp 4242 mountd
+
+ 100005 2 tcp 4242 mountd
+
+ 100000 2 tcp 111 portmapper
+
+ 100000 2 udp 111 portmapper
+
+ 100005 3 udp 4242 mountd
+
+ 100005 1 udp 4242 mountd
+
+ 100003 3 tcp 2049 nfs
+
+ 100005 3 tcp 4242 mountd
+-------------------------
+
+ [[2]] Verify if the HDFS namespace is exported and can be mounted.
+
+-------------------------
+ showmount -e $nfs_server_ip
+-------------------------
+
+ You should see output similar to the following:
+
+-------------------------
+ Exports list on $nfs_server_ip :
+
+ / (everyone)
+-------------------------
+
+
+* {Mount the export “/”}
+
+ Currently NFS v3 only uses TCP as the transportation protocol.
+ NLM is not supported so mount option "nolock" is needed. It's recommended to use
+ hard mount. This is because, even after the client sends all data to
+ NFS gateway, it may take NFS gateway some extra time to transfer data to HDFS
+ when writes were reorderd by NFS client Kernel.
+
+ If soft mount has to be used, the user should give it a relatively
+ long timeout (at least no less than the default timeout on the host) .
+
+ The users can mount the HDFS namespace as shown below:
+
+-------------------------------------------------------------------
+ mount -t nfs -o vers=3,proto=tcp,nolock $server:/ $mount_point
+-------------------------------------------------------------------
+
+ Then the users can access HDFS as part of the local file system except that,
+ hard link and random write are not supported yet.
+
+* {User authentication and mapping}
+
+ NFS gateway in this release uses AUTH_UNIX style authentication. When the user on NFS client
+ accesses the mount point, NFS client passes the UID to NFS gateway.
+ NFS gateway does a lookup to find user name from the UID, and then passes the
+ username to the HDFS along with the HDFS requests.
+ For example, if the NFS client has current user as "admin", when the user accesses
+ the mounted directory, NFS gateway will access HDFS as user "admin". To access HDFS
+ as the user "hdfs", one needs to switch the current user to "hdfs" on the client system
+ when accessing the mounted directory.
+
+ The system administrator must ensure that the user on NFS client host has the same
+ name and UID as that on the NFS gateway host. This is usually not a problem if
+ the same user management system (e.g., LDAP/NIS) is used to create and deploy users on
+ HDFS nodes and NFS client node. In case the user account is created manually in different hosts, one might need to
+ modify UID (e.g., do "usermod -u 123 myusername") on either NFS client or NFS gateway host
+ in order to make it the same on both sides. More technical details of RPC AUTH_UNIX can be found
+ in {{{http://tools.ietf.org/html/rfc1057}RPC specification}}.
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 9aaeb74a1c3..bcebce4e201 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -83,7 +83,6 @@ import org.apache.log4j.RollingFileAppender;
import org.junit.Test;
import com.google.common.collect.Sets;
-import org.mockito.Mockito;
import static org.mockito.Mockito.*;
/**
@@ -892,6 +891,80 @@ public class TestFsck {
}
}
}
+
+ /**
+ * Tests that the # of misreplaced replicas is correct
+ * @throws IOException
+ */
+ @Test
+ public void testFsckMisPlacedReplicas() throws IOException {
+ // Desired replication factor
+ final short REPL_FACTOR = 2;
+ // Number of replicas to actually start
+ short NUM_DN = 2;
+ // Number of blocks to write
+ final short NUM_BLOCKS = 3;
+ // Set a small-ish blocksize
+ final long blockSize = 512;
+
+ String [] racks = {"/rack1", "/rack1"};
+ String [] hosts = {"host1", "host2"};
+
+ Configuration conf = new Configuration();
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+
+ MiniDFSCluster cluster = null;
+ DistributedFileSystem dfs = null;
+
+ try {
+ // Startup a minicluster
+ cluster =
+ new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
+ .racks(racks).build();
+ assertNotNull("Failed Cluster Creation", cluster);
+ cluster.waitClusterUp();
+ dfs = (DistributedFileSystem) cluster.getFileSystem();
+ assertNotNull("Failed to get FileSystem", dfs);
+
+ // Create a file that will be intentionally under-replicated
+ final String pathString = new String("/testfile");
+ final Path path = new Path(pathString);
+ long fileLen = blockSize * NUM_BLOCKS;
+ DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
+
+ // Create an under-replicated file
+ NameNode namenode = cluster.getNameNode();
+ NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
+ .getDatanodeManager().getNetworkTopology();
+ // Add a new node on different rack, so previous blocks' replicas
+ // are considered to be misplaced
+ nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
+ NUM_DN++;
+
+ Map pmap = new HashMap();
+ Writer result = new StringWriter();
+ PrintWriter out = new PrintWriter(result, true);
+ InetAddress remoteAddress = InetAddress.getLocalHost();
+ NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
+ NUM_DN, (short)REPL_FACTOR, remoteAddress);
+
+ // Run the fsck and check the Result
+ final HdfsFileStatus file =
+ namenode.getRpcServer().getFileInfo(pathString);
+ assertNotNull(file);
+ Result res = new Result(conf);
+ fsck.check(pathString, file, res);
+ // check misReplicatedBlock number.
+ assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS);
+ } finally {
+ if(dfs != null) {
+ dfs.close();
+ }
+ if(cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
/** Test fsck with FileNotFound */
@Test
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index f6496b85f5e..1399e393f0d 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -80,6 +80,7 @@
+