HDFS-8581. ContentSummary on / skips further counts on yielding lock (contributed by J.Andreina)
(cherry picked from commit 4014ce5990
)
This commit is contained in:
parent
03f50de48d
commit
cd256c1fda
|
@ -979,6 +979,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
HDFS-8939. Test(S)WebHdfsFileContextMainOperations failing on branch-2.
|
HDFS-8939. Test(S)WebHdfsFileContextMainOperations failing on branch-2.
|
||||||
(Chris Nauroth via jghoman)
|
(Chris Nauroth via jghoman)
|
||||||
|
|
||||||
|
HDFS-8581. ContentSummary on / skips further counts on yielding lock
|
||||||
|
(J.Andreina via vinayakumarb)
|
||||||
|
|
||||||
Release 2.7.2 - UNRELEASED
|
Release 2.7.2 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -660,7 +660,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// The locks were released and reacquired. Check parent first.
|
// The locks were released and reacquired. Check parent first.
|
||||||
if (getParent() == null) {
|
if (!isRoot() && getParent() == null) {
|
||||||
// Stop further counting and return whatever we have so far.
|
// Stop further counting and return whatever we have so far.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,12 +23,14 @@ import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.io.PrintStream;
|
import java.io.PrintStream;
|
||||||
import java.security.PrivilegedExceptionAction;
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.ContentSummary;
|
import org.apache.hadoop.fs.ContentSummary;
|
||||||
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
|
@ -943,4 +945,34 @@ public class TestQuota {
|
||||||
assertTrue(errOutput.contains(StorageType.getTypesSupportingQuota()
|
assertTrue(errOutput.contains(StorageType.getTypesSupportingQuota()
|
||||||
.toString()));
|
.toString()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* File count on root , should return total value of files in Filesystem
|
||||||
|
* when one folder contains files more than "dfs.content-summary.limit".
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testHugeFileCount() throws IOException {
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.setInt("dfs.content-summary.limit", 4);
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
|
DistributedFileSystem dfs = cluster.getFileSystem();
|
||||||
|
for (int i = 1; i <= 5; i++) {
|
||||||
|
FSDataOutputStream out =
|
||||||
|
dfs.create(new Path("/Folder1/" + "file" + i),(short)1);
|
||||||
|
out.close();
|
||||||
|
}
|
||||||
|
FSDataOutputStream out = dfs.create(new Path("/Folder2/file6"),(short)1);
|
||||||
|
out.close();
|
||||||
|
ContentSummary contentSummary = dfs.getContentSummary(new Path("/"));
|
||||||
|
assertEquals(6, contentSummary.getFileCount());
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
cluster = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue