Merge r1578669 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1578676 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2014-03-18 00:10:22 +00:00
parent 80f3726024
commit d611f42c37
37 changed files with 79 additions and 62 deletions

View File

@ -165,6 +165,9 @@ Release 2.4.0 - UNRELEASED
HDFS-6106. Reduce default for HDFS-6106. Reduce default for
dfs.namenode.path.based.cache.refresh.interval.ms (cmccabe) dfs.namenode.path.based.cache.refresh.interval.ms (cmccabe)
HDFS-6090. Use MiniDFSCluster.Builder instead of deprecated constructors.
(Akira AJISAKA via jing9)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery

View File

@ -523,7 +523,8 @@ static void runTestCache(int port) throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
cluster = new MiniDFSCluster(port, conf, 2, true, true, null, null); cluster = new MiniDFSCluster.Builder(conf).nameNodePort(port)
.numDataNodes(2).build();
URI uri = cluster.getFileSystem().getUri(); URI uri = cluster.getFileSystem().getUri();
LOG.info("uri=" + uri); LOG.info("uri=" + uri);

View File

@ -146,7 +146,8 @@ protected void setUp() throws Exception {
fileSystem = FileSystem.getLocal(new JobConf()); fileSystem = FileSystem.getLocal(new JobConf());
} }
else { else {
dfsCluster = new MiniDFSCluster(new JobConf(), dataNodes, true, null); dfsCluster = new MiniDFSCluster.Builder(new JobConf())
.numDataNodes(dataNodes).build();
fileSystem = dfsCluster.getFileSystem(); fileSystem = dfsCluster.getFileSystem();
} }
if (localMR) { if (localMR) {

View File

@ -51,7 +51,7 @@ public void testJobShell() throws Exception {
try { try {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
//start the mini mr and dfs cluster. //start the mini mr and dfs cluster.
dfs = new MiniDFSCluster(conf, 2 , true, null); dfs = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fs = dfs.getFileSystem(); fs = dfs.getFileSystem();
FSDataOutputStream stream = fs.create(testFile); FSDataOutputStream stream = fs.create(testFile);
stream.write("teststring".getBytes()); stream.write("teststring".getBytes());

View File

@ -125,7 +125,7 @@ public void testWithDFS() throws IOException {
JobConf conf = new JobConf(); JobConf conf = new JobConf();
conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/custom/mapred/system"); conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/custom/mapred/system");
dfs = new MiniDFSCluster(conf, 4, true, null); dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, conf); mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, conf);

View File

@ -141,7 +141,8 @@ public void testLazyOutput() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
// Start the mini-MR and mini-DFS clusters // Start the mini-MR and mini-DFS clusters
dfs = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true, null); dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_HADOOP_SLAVES)
.build();
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri().toString(), 1); mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri().toString(), 1);

View File

@ -44,11 +44,10 @@ public class TestMRCJCFileInputFormat extends TestCase {
MiniDFSCluster dfs = null; MiniDFSCluster dfs = null;
private MiniDFSCluster newDFSCluster(JobConf conf) throws Exception { private MiniDFSCluster newDFSCluster(JobConf conf) throws Exception {
return new MiniDFSCluster(conf, 4, true, return new MiniDFSCluster.Builder(conf).numDataNodes(4)
new String[]{"/rack0", "/rack0", .racks(new String[]{"/rack0", "/rack0", "/rack1", "/rack1"})
"/rack1", "/rack1"}, .hosts(new String[]{"host0", "host1", "host2", "host3"})
new String[]{"host0", "host1", .build();
"host2", "host3"});
} }
public void testLocality() throws Exception { public void testLocality() throws Exception {
@ -162,7 +161,7 @@ public void testMultiLevelInput() throws Exception {
JobConf job = new JobConf(conf); JobConf job = new JobConf(conf);
job.setBoolean("dfs.replication.considerLoad", false); job.setBoolean("dfs.replication.considerLoad", false);
dfs = new MiniDFSCluster(job, 1, true, rack1, hosts1); dfs = new MiniDFSCluster.Builder(job).racks(rack1).hosts(hosts1).build();
dfs.waitActive(); dfs.waitActive();
String namenode = (dfs.getFileSystem()).getUri().getHost() + ":" + String namenode = (dfs.getFileSystem()).getUri().getHost() + ":" +

View File

@ -76,7 +76,8 @@ public void testMerge() throws Exception {
try { try {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
// Start the mini-MR and mini-DFS clusters // Start the mini-MR and mini-DFS clusters
dfsCluster = new MiniDFSCluster(conf, NUM_HADOOP_DATA_NODES, true, null); dfsCluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_HADOOP_DATA_NODES).build();
fileSystem = dfsCluster.getFileSystem(); fileSystem = dfsCluster.getFileSystem();
mrCluster = MiniMRClientClusterFactory.create(this.getClass(), mrCluster = MiniMRClientClusterFactory.create(this.getClass(),
NUM_HADOOP_DATA_NODES, conf); NUM_HADOOP_DATA_NODES, conf);

View File

@ -324,7 +324,7 @@ public void reduce(WritableComparable key, Iterator<Writable> values,
@BeforeClass @BeforeClass
public static void setup() throws IOException { public static void setup() throws IOException {
// create configuration, dfs, file system and mapred cluster // create configuration, dfs, file system and mapred cluster
dfs = new MiniDFSCluster(conf, 1, true, null); dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {

View File

@ -169,7 +169,7 @@ public void testClassPath() throws IOException {
final int jobTrackerPort = 60050; final int jobTrackerPort = 60050;
Configuration conf = new Configuration(); Configuration conf = new Configuration();
dfs = new MiniDFSCluster(conf, 1, true, null); dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
namenode = fileSys.getUri().toString(); namenode = fileSys.getUri().toString();
mr = new MiniMRCluster(taskTrackers, namenode, 3); mr = new MiniMRCluster(taskTrackers, namenode, 3);
@ -201,7 +201,7 @@ public void testExternalWritable()
final int taskTrackers = 4; final int taskTrackers = 4;
Configuration conf = new Configuration(); Configuration conf = new Configuration();
dfs = new MiniDFSCluster(conf, 1, true, null); dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
namenode = fileSys.getUri().toString(); namenode = fileSys.getUri().toString();
mr = new MiniMRCluster(taskTrackers, namenode, 3); mr = new MiniMRCluster(taskTrackers, namenode, 3);

View File

@ -38,7 +38,7 @@ public void testWithDFS() throws IOException {
FileSystem fileSys = null; FileSystem fileSys = null;
try { try {
JobConf conf = new JobConf(); JobConf conf = new JobConf();
dfs = new MiniDFSCluster(conf, 1, true, null); dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4); mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
MRCaching.setupCache("/cachedir", fileSys); MRCaching.setupCache("/cachedir", fileSys);

View File

@ -75,7 +75,7 @@ public RunningJob run() throws IOException {
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
dfs = new MiniDFSCluster(conf, 4, true, null); dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() { fs = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
public FileSystem run() throws IOException { public FileSystem run() throws IOException {

View File

@ -92,8 +92,8 @@ private void testCachingAtLevel(int level) throws Exception {
String rack2 = getRack(1, level); String rack2 = getRack(1, level);
Configuration conf = new Configuration(); Configuration conf = new Configuration();
// Run a datanode on host1 under /a/b/c/..../d1/e1/f1 // Run a datanode on host1 under /a/b/c/..../d1/e1/f1
dfs = new MiniDFSCluster(conf, 1, true, new String[] {rack1}, dfs = new MiniDFSCluster.Builder(conf).racks(new String[] {rack1})
new String[] {"host1.com"}); .hosts(new String[] {"host1.com"}).build();
dfs.waitActive(); dfs.waitActive();
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
if (!fileSys.mkdirs(inDir)) { if (!fileSys.mkdirs(inDir)) {

View File

@ -57,7 +57,7 @@ public static Test suite() {
TestSetup setup = new TestSetup(mySuite) { TestSetup setup = new TestSetup(mySuite) {
protected void setUp() throws Exception { protected void setUp() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
dfsCluster = new MiniDFSCluster(conf, 2, true, null); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
mrCluster = new MiniMRCluster(2, mrCluster = new MiniMRCluster(2,
dfsCluster.getFileSystem().getUri().toString(), 1); dfsCluster.getFileSystem().getUri().toString(), 1);
} }

View File

@ -106,7 +106,7 @@ public void testJobWithDFS() throws IOException {
final int taskTrackers = 4; final int taskTrackers = 4;
final int jobTrackerPort = 60050; final int jobTrackerPort = 60050;
Configuration conf = new Configuration(); Configuration conf = new Configuration();
dfs = new MiniDFSCluster(conf, 1, true, null); dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
namenode = fileSys.getUri().toString(); namenode = fileSys.getUri().toString();
mr = new MiniMRCluster(taskTrackers, namenode, 2); mr = new MiniMRCluster(taskTrackers, namenode, 2);

View File

@ -62,7 +62,7 @@ public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestDatamerge.class)) { TestSetup setup = new TestSetup(new TestSuite(TestDatamerge.class)) {
protected void setUp() throws Exception { protected void setUp() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
cluster = new MiniDFSCluster(conf, 2, true, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
} }
protected void tearDown() throws Exception { protected void tearDown() throws Exception {
if (cluster != null) { if (cluster != null) {

View File

@ -39,9 +39,10 @@ public void testSplitting() throws Exception {
JobConf conf = new JobConf(); JobConf conf = new JobConf();
MiniDFSCluster dfs = null; MiniDFSCluster dfs = null;
try { try {
dfs = new MiniDFSCluster(conf, 4, true, new String[] { "/rack0", dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4)
"/rack0", "/rack1", "/rack1" }, new String[] { "host0", "host1", .racks(new String[] { "/rack0", "/rack0", "/rack1", "/rack1" })
"host2", "host3" }); .hosts(new String[] { "host0", "host1", "host2", "host3" })
.build();
FileSystem fs = dfs.getFileSystem(); FileSystem fs = dfs.getFileSystem();
Path path = getPath("/foo/bar", fs); Path path = getPath("/foo/bar", fs);

View File

@ -79,7 +79,7 @@ public void testPipes() throws IOException {
try { try {
final int numSlaves = 2; final int numSlaves = 2;
Configuration conf = new Configuration(); Configuration conf = new Configuration();
dfs = new MiniDFSCluster(conf, numSlaves, true, null); dfs = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves).build();
mr = new MiniMRCluster(numSlaves, dfs.getFileSystem().getUri().toString(), 1); mr = new MiniMRCluster(numSlaves, dfs.getFileSystem().getUri().toString(), 1);
writeInputFile(dfs.getFileSystem(), inputPath); writeInputFile(dfs.getFileSystem(), inputPath);
runProgram(mr, dfs, wordCountSimple, runProgram(mr, dfs, wordCountSimple,

View File

@ -151,8 +151,8 @@ private void sleepForever() {
public void start() throws IOException, FileNotFoundException, public void start() throws IOException, FileNotFoundException,
URISyntaxException { URISyntaxException {
if (!noDFS) { if (!noDFS) {
dfs = new MiniDFSCluster(nnPort, conf, numDataNodes, true, true, dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
dfsOpts, null, null); .numDataNodes(numDataNodes).startupOption(dfsOpts).build();
LOG.info("Started MiniDFSCluster -- namenode on port " LOG.info("Started MiniDFSCluster -- namenode on port "
+ dfs.getNameNodePort()); + dfs.getNameNodePort());
} }

View File

@ -131,7 +131,8 @@ public void testLazyOutput() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
// Start the mini-MR and mini-DFS clusters // Start the mini-MR and mini-DFS clusters
dfs = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true, null); dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_HADOOP_SLAVES)
.build();
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri().toString(), 1); mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri().toString(), 1);

View File

@ -313,7 +313,8 @@ public void testSplitPlacement() throws Exception {
*/ */
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setBoolean("dfs.replication.considerLoad", false); conf.setBoolean("dfs.replication.considerLoad", false);
dfs = new MiniDFSCluster(conf, 1, true, rack1, hosts1); dfs = new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1)
.build();
dfs.waitActive(); dfs.waitActive();
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
@ -855,7 +856,8 @@ public void testSplitPlacementForCompressedFiles() throws Exception {
*/ */
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setBoolean("dfs.replication.considerLoad", false); conf.setBoolean("dfs.replication.considerLoad", false);
dfs = new MiniDFSCluster(conf, 1, true, rack1, hosts1); dfs = new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1)
.build();
dfs.waitActive(); dfs.waitActive();
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
@ -1197,7 +1199,8 @@ public void testMissingBlocks() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.set("fs.hdfs.impl", MissingBlockFileSystem.class.getName()); conf.set("fs.hdfs.impl", MissingBlockFileSystem.class.getName());
conf.setBoolean("dfs.replication.considerLoad", false); conf.setBoolean("dfs.replication.considerLoad", false);
dfs = new MiniDFSCluster(conf, 1, true, rack1, hosts1); dfs = new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1)
.build();
dfs.waitActive(); dfs.waitActive();
namenode = (dfs.getFileSystem()).getUri().getHost() + ":" + namenode = (dfs.getFileSystem()).getUri().getHost() + ":" +

View File

@ -37,9 +37,10 @@ public void testSplitting() throws Exception {
Job job = Job.getInstance(); Job job = Job.getInstance();
MiniDFSCluster dfs = null; MiniDFSCluster dfs = null;
try { try {
dfs = new MiniDFSCluster(job.getConfiguration(), 4, true, new String[] { "/rack0", dfs = new MiniDFSCluster.Builder(job.getConfiguration()).numDataNodes(4)
"/rack0", "/rack1", "/rack1" }, new String[] { "host0", "host1", .racks(new String[] { "/rack0", "/rack0", "/rack1", "/rack1" })
"host2", "host3" }); .hosts(new String[] { "host0", "host1", "host2", "host3" })
.build();
FileSystem fs = dfs.getFileSystem(); FileSystem fs = dfs.getFileSystem();
Path path = getPath("/foo/bar", fs); Path path = getPath("/foo/bar", fs);

View File

@ -45,7 +45,7 @@ public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestJoinDatamerge.class)) { TestSetup setup = new TestSetup(new TestSuite(TestJoinDatamerge.class)) {
protected void setUp() throws Exception { protected void setUp() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
cluster = new MiniDFSCluster(conf, 2, true, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
} }
protected void tearDown() throws Exception { protected void tearDown() throws Exception {
if (cluster != null) { if (cluster != null) {

View File

@ -50,7 +50,7 @@ public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestJoinProperties.class)) { TestSetup setup = new TestSetup(new TestSuite(TestJoinProperties.class)) {
protected void setUp() throws Exception { protected void setUp() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
cluster = new MiniDFSCluster(conf, 2, true, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
base = cluster.getFileSystem().makeQualified(new Path("/nested")); base = cluster.getFileSystem().makeQualified(new Path("/nested"));
src = generateSources(conf); src = generateSources(conf);
} }

View File

@ -58,7 +58,8 @@ public class TestMRCredentials {
public static void setUp() throws Exception { public static void setUp() throws Exception {
System.setProperty("hadoop.log.dir", "logs"); System.setProperty("hadoop.log.dir", "logs");
Configuration conf = new Configuration(); Configuration conf = new Configuration();
dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves)
.build();
jConf = new JobConf(conf); jConf = new JobConf(conf);
FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString()); FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf); mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf);

View File

@ -92,7 +92,7 @@ private void startCluster(Configuration conf) throws Exception {
YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
+ File.pathSeparator + classpathDir; + File.pathSeparator + classpathDir;
conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp); conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
dfsCluster = new MiniDFSCluster(conf, 1, true, null); dfsCluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSystem = dfsCluster.getFileSystem(); FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user")); fileSystem.mkdirs(new Path("/user"));

View File

@ -70,7 +70,8 @@ protected void setUp() throws Exception {
UserGroupInformation.createUserForTesting("u1", userGroups); UserGroupInformation.createUserForTesting("u1", userGroups);
UserGroupInformation.createUserForTesting("u2", new String[]{"gg"}); UserGroupInformation.createUserForTesting("u2", new String[]{"gg"});
dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes)
.build();
FileSystem fileSystem = dfsCluster.getFileSystem(); FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user")); fileSystem.mkdirs(new Path("/user"));

View File

@ -57,7 +57,8 @@ protected void setUp() throws Exception {
conf.set("dfs.permissions", "true"); conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple"); conf.set("hadoop.security.authentication", "simple");
dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes)
.build();
FileSystem fileSystem = dfsCluster.getFileSystem(); FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user")); fileSystem.mkdirs(new Path("/user"));

View File

@ -43,7 +43,7 @@ public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestDataJoin.class)) { TestSetup setup = new TestSetup(new TestSuite(TestDataJoin.class)) {
protected void setUp() throws Exception { protected void setUp() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
cluster = new MiniDFSCluster(conf, 2, true, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
} }
protected void tearDown() throws Exception { protected void tearDown() throws Exception {
if (cluster != null) { if (cluster != null) {

View File

@ -49,7 +49,7 @@ public class TestGlobbedCopyListing {
@BeforeClass @BeforeClass
public static void setup() throws Exception { public static void setup() throws Exception {
cluster = new MiniDFSCluster(new Configuration(), 1, true, null); cluster = new MiniDFSCluster.Builder(new Configuration()).build();
createSourceData(); createSourceData();
} }

View File

@ -280,7 +280,7 @@ public void testCopyFromDfsToDfs() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
cluster = new MiniDFSCluster(conf, 2, true, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem(); final FileSystem hdfs = cluster.getFileSystem();
namenode = FileSystem.getDefaultUri(conf).toString(); namenode = FileSystem.getDefaultUri(conf).toString();
if (namenode.startsWith("hdfs://")) { if (namenode.startsWith("hdfs://")) {
@ -310,7 +310,7 @@ public void testEmptyDir() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
cluster = new MiniDFSCluster(conf, 2, true, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem(); final FileSystem hdfs = cluster.getFileSystem();
namenode = FileSystem.getDefaultUri(conf).toString(); namenode = FileSystem.getDefaultUri(conf).toString();
if (namenode.startsWith("hdfs://")) { if (namenode.startsWith("hdfs://")) {
@ -340,7 +340,7 @@ public void testCopyFromLocalToDfs() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
cluster = new MiniDFSCluster(conf, 1, true, null); cluster = new MiniDFSCluster.Builder(conf).build();
final FileSystem hdfs = cluster.getFileSystem(); final FileSystem hdfs = cluster.getFileSystem();
final String namenode = hdfs.getUri().toString(); final String namenode = hdfs.getUri().toString();
if (namenode.startsWith("hdfs://")) { if (namenode.startsWith("hdfs://")) {
@ -369,7 +369,7 @@ public void testCopyFromDfsToLocal() throws Exception {
try { try {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
final FileSystem localfs = FileSystem.get(LOCAL_FS, conf); final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
cluster = new MiniDFSCluster(conf, 1, true, null); cluster = new MiniDFSCluster.Builder(conf).build();
final FileSystem hdfs = cluster.getFileSystem(); final FileSystem hdfs = cluster.getFileSystem();
final String namenode = FileSystem.getDefaultUri(conf).toString(); final String namenode = FileSystem.getDefaultUri(conf).toString();
if (namenode.startsWith("hdfs://")) { if (namenode.startsWith("hdfs://")) {
@ -396,7 +396,7 @@ public void testCopyDfsToDfsUpdateOverwrite() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
cluster = new MiniDFSCluster(conf, 2, true, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem(); final FileSystem hdfs = cluster.getFileSystem();
final String namenode = hdfs.getUri().toString(); final String namenode = hdfs.getUri().toString();
if (namenode.startsWith("hdfs://")) { if (namenode.startsWith("hdfs://")) {
@ -456,7 +456,7 @@ public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
cluster = new MiniDFSCluster(conf, 2, true, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem(); final FileSystem hdfs = cluster.getFileSystem();
final String namenode = hdfs.getUri().toString(); final String namenode = hdfs.getUri().toString();
@ -614,7 +614,7 @@ public void testBasedir() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
cluster = new MiniDFSCluster(conf, 2, true, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem(); final FileSystem hdfs = cluster.getFileSystem();
namenode = FileSystem.getDefaultUri(conf).toString(); namenode = FileSystem.getDefaultUri(conf).toString();
if (namenode.startsWith("hdfs://")) { if (namenode.startsWith("hdfs://")) {
@ -639,7 +639,7 @@ public void testPreserveOption() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
cluster = new MiniDFSCluster(conf, 2, true, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
String nnUri = FileSystem.getDefaultUri(conf).toString(); String nnUri = FileSystem.getDefaultUri(conf).toString();
FileSystem fs = FileSystem.get(URI.create(nnUri), conf); FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
@ -791,7 +791,7 @@ public void testLimits() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
cluster = new MiniDFSCluster(conf, 2, true, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final String nnUri = FileSystem.getDefaultUri(conf).toString(); final String nnUri = FileSystem.getDefaultUri(conf).toString();
final FileSystem fs = FileSystem.get(URI.create(nnUri), conf); final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
final DistCpV1 distcp = new DistCpV1(conf); final DistCpV1 distcp = new DistCpV1(conf);
@ -899,7 +899,7 @@ public void testHftpAccessControl() throws Exception {
//start cluster by DFS_UGI //start cluster by DFS_UGI
final Configuration dfsConf = new Configuration(); final Configuration dfsConf = new Configuration();
cluster = new MiniDFSCluster(dfsConf, 2, true, null); cluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(2).build();
cluster.waitActive(); cluster.waitActive();
final String httpAdd = dfsConf.get("dfs.http.address"); final String httpAdd = dfsConf.get("dfs.http.address");
@ -955,7 +955,7 @@ public void testDelete() throws Exception {
conf.setInt("fs.trash.interval", 60); conf.setInt("fs.trash.interval", 60);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
cluster = new MiniDFSCluster(conf, 2, true, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final URI nnURI = FileSystem.getDefaultUri(conf); final URI nnURI = FileSystem.getDefaultUri(conf);
final String nnUri = nnURI.toString(); final String nnUri = nnURI.toString();
final FileSystem fs = FileSystem.get(URI.create(nnUri), conf); final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
@ -1027,7 +1027,7 @@ public void testDeleteLocal() throws Exception {
try { try {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
final FileSystem localfs = FileSystem.get(LOCAL_FS, conf); final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
cluster = new MiniDFSCluster(conf, 1, true, null); cluster = new MiniDFSCluster.Builder(conf).build();
final FileSystem hdfs = cluster.getFileSystem(); final FileSystem hdfs = cluster.getFileSystem();
final String namenode = FileSystem.getDefaultUri(conf).toString(); final String namenode = FileSystem.getDefaultUri(conf).toString();
if (namenode.startsWith("hdfs://")) { if (namenode.startsWith("hdfs://")) {
@ -1060,7 +1060,7 @@ public void testGlobbing() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
cluster = new MiniDFSCluster(conf, 2, true, null); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem(); final FileSystem hdfs = cluster.getFileSystem();
namenode = FileSystem.getDefaultUri(conf).toString(); namenode = FileSystem.getDefaultUri(conf).toString();
if (namenode.startsWith("hdfs://")) { if (namenode.startsWith("hdfs://")) {

View File

@ -39,7 +39,8 @@ public class TestDumpTypedBytes {
@Test @Test
public void testDumping() throws Exception { public void testDumping() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
PrintStream psBackup = System.out; PrintStream psBackup = System.out;
ByteArrayOutputStream out = new ByteArrayOutputStream(); ByteArrayOutputStream out = new ByteArrayOutputStream();

View File

@ -54,7 +54,7 @@ public TestFileArgs() throws IOException
{ {
// Set up mini cluster // Set up mini cluster
conf = new Configuration(); conf = new Configuration();
dfs = new MiniDFSCluster(conf, 1, true, null); dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
namenode = fileSys.getUri().getAuthority(); namenode = fileSys.getUri().getAuthority();
mr = new MiniMRCluster(1, namenode, 1); mr = new MiniMRCluster(1, namenode, 1);

View File

@ -39,7 +39,8 @@ public class TestLoadTypedBytes {
@Test @Test
public void testLoading() throws Exception { public void testLoading() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
ByteArrayOutputStream out = new ByteArrayOutputStream(); ByteArrayOutputStream out = new ByteArrayOutputStream();

View File

@ -65,7 +65,7 @@ public TestMultipleArchiveFiles() throws Exception {
input = "HADOOP"; input = "HADOOP";
expectedOutput = "HADOOP\t\nHADOOP\t\n"; expectedOutput = "HADOOP\t\nHADOOP\t\n";
conf = new Configuration(); conf = new Configuration();
dfs = new MiniDFSCluster(conf, 1, true, null); dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem(); fileSys = dfs.getFileSystem();
namenode = fileSys.getUri().getAuthority(); namenode = fileSys.getUri().getAuthority();
mr = new MiniMRCluster(1, namenode, 1); mr = new MiniMRCluster(1, namenode, 1);

View File

@ -69,7 +69,7 @@ public void testMultipleCachefiles() throws Exception
MiniDFSCluster dfs = null; MiniDFSCluster dfs = null;
try{ try{
Configuration conf = new Configuration(); Configuration conf = new Configuration();
dfs = new MiniDFSCluster(conf, 1, true, null); dfs = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = dfs.getFileSystem(); FileSystem fileSys = dfs.getFileSystem();
String namenode = fileSys.getUri().toString(); String namenode = fileSys.getUri().toString();

View File

@ -61,7 +61,7 @@ public void testSymLink() throws Exception
MiniDFSCluster dfs = null; MiniDFSCluster dfs = null;
try { try {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
dfs = new MiniDFSCluster(conf, 1, true, null); dfs = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = dfs.getFileSystem(); FileSystem fileSys = dfs.getFileSystem();
String namenode = fileSys.getUri().toString(); String namenode = fileSys.getUri().toString();
mr = new MiniMRCluster(1, namenode, 3); mr = new MiniMRCluster(1, namenode, 3);