diff --git a/hdfs/CHANGES.txt b/hdfs/CHANGES.txt index 361a0dd6dd5..91e49be8288 100644 --- a/hdfs/CHANGES.txt +++ b/hdfs/CHANGES.txt @@ -560,6 +560,9 @@ Trunk (unreleased changes) HDFS-2153. Move DFSClientAdapter to test and fix some javac warnings in OfflineEditsViewerHelper. (szetszwo) + HDFS-2159. Deprecate DistributedFileSystem.getClient() and fixed the + deprecated warnings in DFSAdmin. (szetszwo) + OPTIMIZATIONS HDFS-1458. Improve checkpoint performance by avoiding unnecessary image diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hdfs/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index c4e916e68b7..3ff709b0ff7 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -532,6 +532,9 @@ public class DistributedFileSystem extends FileSystem { return "DFS[" + dfs + "]"; } + /** @deprecated DFSClient should not be accessed directly. */ + @InterfaceAudience.Private + @Deprecated public DFSClient getClient() { return dfs; } @@ -624,9 +627,15 @@ public class DistributedFileSystem extends FileSystem { return new CorruptFileBlockIterator(dfs, path); } - /** Return statistics for each datanode. */ + /** @return datanode statistics. */ public DatanodeInfo[] getDataNodeStats() throws IOException { - return dfs.datanodeReport(DatanodeReportType.ALL); + return getDataNodeStats(DatanodeReportType.ALL); + } + + /** @return datanode statistics for the given type. */ + public DatanodeInfo[] getDataNodeStats(final DatanodeReportType type + ) throws IOException { + return dfs.datanodeReport(type); } /** diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hdfs/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index e7901a9f02f..ffbf783f092 100644 --- a/hdfs/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hdfs/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.shell.Command; import org.apache.hadoop.fs.shell.CommandFormat; -import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -95,7 +94,7 @@ public class DFSAdmin extends FsShell { /** Constructor */ ClearQuotaCommand(String[] args, int pos, FileSystem fs) { super(fs); - CommandFormat c = new CommandFormat(NAME, 1, Integer.MAX_VALUE); + CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE); List parameters = c.parse(args, pos); this.args = parameters.toArray(new String[parameters.size()]); } @@ -140,7 +139,7 @@ public class DFSAdmin extends FsShell { /** Constructor */ SetQuotaCommand(String[] args, int pos, FileSystem fs) { super(fs); - CommandFormat c = new CommandFormat(NAME, 2, Integer.MAX_VALUE); + CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE); List parameters = c.parse(args, pos); this.quota = Long.parseLong(parameters.remove(0)); this.args = parameters.toArray(new String[parameters.size()]); @@ -180,7 +179,7 @@ public class DFSAdmin extends FsShell { /** Constructor */ ClearSpaceQuotaCommand(String[] args, int pos, FileSystem fs) { super(fs); - CommandFormat c = new CommandFormat(NAME, 1, Integer.MAX_VALUE); + CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE); List parameters = c.parse(args, pos); this.args = parameters.toArray(new String[parameters.size()]); } @@ -228,7 +227,7 @@ public class DFSAdmin extends FsShell { /** Constructor */ SetSpaceQuotaCommand(String[] args, int pos, FileSystem fs) { super(fs); - CommandFormat c = new CommandFormat(NAME, 2, Integer.MAX_VALUE); + CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE); List parameters = c.parse(args, pos); String str = parameters.remove(0).trim(); quota = StringUtils.TraditionalBinaryPrefix.string2long(str); @@ -327,10 +326,8 @@ public class DFSAdmin extends FsShell { System.out.println("-------------------------------------------------"); - DatanodeInfo[] live = dfs.getClient().datanodeReport( - DatanodeReportType.LIVE); - DatanodeInfo[] dead = dfs.getClient().datanodeReport( - DatanodeReportType.DEAD); + DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE); + DatanodeInfo[] dead = dfs.getDataNodeStats(DatanodeReportType.DEAD); System.out.println("Datanodes available: " + live.length + " (" + (live.length + dead.length) + " total, " + dead.length + " dead)\n"); @@ -691,9 +688,8 @@ public class DFSAdmin extends FsShell { */ public int printTopology() throws IOException { DistributedFileSystem dfs = getDFS(); - DFSClient client = dfs.getClient(); - DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL); - + final DatanodeInfo[] report = dfs.getDataNodeStats(); + // Build a map of rack -> nodes from the datanode report HashMap > tree = new HashMap>(); for(DatanodeInfo dni : report) { diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSClientAdapter.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSClientAdapter.java index 791ec6b33b3..20302217c12 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSClientAdapter.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSClientAdapter.java @@ -23,10 +23,13 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; public class DFSClientAdapter { + public static DFSClient getDFSClient(DistributedFileSystem dfs) { + return dfs.dfs; + } - public static void stopLeaseRenewer(DFSClient dfsClient) throws IOException { + public static void stopLeaseRenewer(DistributedFileSystem dfs) throws IOException { try { - dfsClient.leaserenewer.interruptAndJoin(); + dfs.dfs.leaserenewer.interruptAndJoin(); } catch (InterruptedException e) { throw new IOException(e); } diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java index 27ff2c142ad..d06d9766c3d 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java @@ -71,7 +71,7 @@ public class TestAbandonBlock { fout.hflush(); // Now abandon the last block - DFSClient dfsclient = ((DistributedFileSystem)fs).getClient(); + DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem)fs); LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1); LocatedBlock b = blocks.getLastLocatedBlock(); dfsclient.getNamenode().abandonBlock(b.getBlock(), src, dfsclient.clientName); diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java index cb7ae2749b3..6e65ad2c51d 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java @@ -80,7 +80,7 @@ public class TestLeaseRecovery extends junit.framework.TestCase { String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L); - assertTrue(dfs.dfs.exists(filestr)); + assertTrue(dfs.exists(filepath)); DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM); //get block info for the last block diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java index 67e4412cd59..eb58f7f195a 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java @@ -25,6 +25,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSClientAdapter; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -84,10 +85,11 @@ public class TestInterDatanodeProtocol { String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L); - assertTrue(dfs.getClient().exists(filestr)); + assertTrue(dfs.exists(filepath)); //get block info - LocatedBlock locatedblock = getLastLocatedBlock(dfs.getClient().getNamenode(), filestr); + LocatedBlock locatedblock = getLastLocatedBlock( + DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr); DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); assertTrue(datanodeinfo.length > 0); @@ -236,7 +238,7 @@ public class TestInterDatanodeProtocol { //get block info final LocatedBlock locatedblock = getLastLocatedBlock( - dfs.getClient().getNamenode(), filestr); + DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr); final DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); Assert.assertTrue(datanodeinfo.length > 0); diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java index 07c35e7c13f..fba411ab88d 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java @@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSClientAdapter; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -32,7 +33,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; @@ -124,7 +124,7 @@ public class TestTransferRbw { final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(), oldrbw.getGenerationStamp()); final BlockOpResponseProto s = DFSTestUtil.transferRbw( - b, fs.getClient(), oldnodeinfo, newnodeinfo); + b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo); Assert.assertEquals(Status.SUCCESS, s.getStatus()); } diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java index 059592522e4..7ca4524f99b 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java @@ -221,7 +221,7 @@ public class OfflineEditsViewerHelper { // OP_REASSIGN_LEASE 22 String filePath = "/hard-lease-recovery-test"; byte[] bytes = "foo-bar-baz".getBytes(); - DFSClientAdapter.stopLeaseRenewer(dfs.getClient()); + DFSClientAdapter.stopLeaseRenewer(dfs); FSDataOutputStream leaseRecoveryPath = dfs.create(new Path(filePath)); leaseRecoveryPath.write(bytes); leaseRecoveryPath.hflush(); diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java index b283b8a33fa..cc1781490a1 100644 --- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java +++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java @@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSClientAdapter; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -72,7 +73,7 @@ public class TestBlockUnderConstruction { // wait until the block is allocated by DataStreamer BlockLocation[] locatedBlocks; while(blocksAfter <= blocksBefore) { - locatedBlocks = hdfs.getClient().getBlockLocations( + locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations( file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS); blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length; }