HDFS-2159. Deprecate DistributedFileSystem.getClient() and fixed the deprecated warnings in DFSAdmin.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1147359 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2011-07-16 05:12:40 +00:00
parent c093580824
commit c163455df4
10 changed files with 39 additions and 25 deletions

View File

@ -560,6 +560,9 @@ Trunk (unreleased changes)
HDFS-2153. Move DFSClientAdapter to test and fix some javac warnings in HDFS-2153. Move DFSClientAdapter to test and fix some javac warnings in
OfflineEditsViewerHelper. (szetszwo) OfflineEditsViewerHelper. (szetszwo)
HDFS-2159. Deprecate DistributedFileSystem.getClient() and fixed the
deprecated warnings in DFSAdmin. (szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

View File

@ -532,6 +532,9 @@ public class DistributedFileSystem extends FileSystem {
return "DFS[" + dfs + "]"; return "DFS[" + dfs + "]";
} }
/** @deprecated DFSClient should not be accessed directly. */
@InterfaceAudience.Private
@Deprecated
public DFSClient getClient() { public DFSClient getClient() {
return dfs; return dfs;
} }
@ -624,9 +627,15 @@ public class DistributedFileSystem extends FileSystem {
return new CorruptFileBlockIterator(dfs, path); return new CorruptFileBlockIterator(dfs, path);
} }
/** Return statistics for each datanode. */ /** @return datanode statistics. */
public DatanodeInfo[] getDataNodeStats() throws IOException { public DatanodeInfo[] getDataNodeStats() throws IOException {
return dfs.datanodeReport(DatanodeReportType.ALL); return getDataNodeStats(DatanodeReportType.ALL);
}
/** @return datanode statistics for the given type. */
public DatanodeInfo[] getDataNodeStats(final DatanodeReportType type
) throws IOException {
return dfs.datanodeReport(type);
} }
/** /**

View File

@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.shell.Command; import org.apache.hadoop.fs.shell.Command;
import org.apache.hadoop.fs.shell.CommandFormat; import org.apache.hadoop.fs.shell.CommandFormat;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
@ -95,7 +94,7 @@ public class DFSAdmin extends FsShell {
/** Constructor */ /** Constructor */
ClearQuotaCommand(String[] args, int pos, FileSystem fs) { ClearQuotaCommand(String[] args, int pos, FileSystem fs) {
super(fs); super(fs);
CommandFormat c = new CommandFormat(NAME, 1, Integer.MAX_VALUE); CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos); List<String> parameters = c.parse(args, pos);
this.args = parameters.toArray(new String[parameters.size()]); this.args = parameters.toArray(new String[parameters.size()]);
} }
@ -140,7 +139,7 @@ public class DFSAdmin extends FsShell {
/** Constructor */ /** Constructor */
SetQuotaCommand(String[] args, int pos, FileSystem fs) { SetQuotaCommand(String[] args, int pos, FileSystem fs) {
super(fs); super(fs);
CommandFormat c = new CommandFormat(NAME, 2, Integer.MAX_VALUE); CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos); List<String> parameters = c.parse(args, pos);
this.quota = Long.parseLong(parameters.remove(0)); this.quota = Long.parseLong(parameters.remove(0));
this.args = parameters.toArray(new String[parameters.size()]); this.args = parameters.toArray(new String[parameters.size()]);
@ -180,7 +179,7 @@ public class DFSAdmin extends FsShell {
/** Constructor */ /** Constructor */
ClearSpaceQuotaCommand(String[] args, int pos, FileSystem fs) { ClearSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
super(fs); super(fs);
CommandFormat c = new CommandFormat(NAME, 1, Integer.MAX_VALUE); CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos); List<String> parameters = c.parse(args, pos);
this.args = parameters.toArray(new String[parameters.size()]); this.args = parameters.toArray(new String[parameters.size()]);
} }
@ -228,7 +227,7 @@ public class DFSAdmin extends FsShell {
/** Constructor */ /** Constructor */
SetSpaceQuotaCommand(String[] args, int pos, FileSystem fs) { SetSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
super(fs); super(fs);
CommandFormat c = new CommandFormat(NAME, 2, Integer.MAX_VALUE); CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos); List<String> parameters = c.parse(args, pos);
String str = parameters.remove(0).trim(); String str = parameters.remove(0).trim();
quota = StringUtils.TraditionalBinaryPrefix.string2long(str); quota = StringUtils.TraditionalBinaryPrefix.string2long(str);
@ -327,10 +326,8 @@ public class DFSAdmin extends FsShell {
System.out.println("-------------------------------------------------"); System.out.println("-------------------------------------------------");
DatanodeInfo[] live = dfs.getClient().datanodeReport( DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
DatanodeReportType.LIVE); DatanodeInfo[] dead = dfs.getDataNodeStats(DatanodeReportType.DEAD);
DatanodeInfo[] dead = dfs.getClient().datanodeReport(
DatanodeReportType.DEAD);
System.out.println("Datanodes available: " + live.length + System.out.println("Datanodes available: " + live.length +
" (" + (live.length + dead.length) + " total, " + " (" + (live.length + dead.length) + " total, " +
dead.length + " dead)\n"); dead.length + " dead)\n");
@ -691,8 +688,7 @@ public class DFSAdmin extends FsShell {
*/ */
public int printTopology() throws IOException { public int printTopology() throws IOException {
DistributedFileSystem dfs = getDFS(); DistributedFileSystem dfs = getDFS();
DFSClient client = dfs.getClient(); final DatanodeInfo[] report = dfs.getDataNodeStats();
DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
// Build a map of rack -> nodes from the datanode report // Build a map of rack -> nodes from the datanode report
HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>(); HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();

View File

@ -23,10 +23,13 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
public class DFSClientAdapter { public class DFSClientAdapter {
public static DFSClient getDFSClient(DistributedFileSystem dfs) {
return dfs.dfs;
}
public static void stopLeaseRenewer(DFSClient dfsClient) throws IOException { public static void stopLeaseRenewer(DistributedFileSystem dfs) throws IOException {
try { try {
dfsClient.leaserenewer.interruptAndJoin(); dfs.dfs.leaserenewer.interruptAndJoin();
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw new IOException(e); throw new IOException(e);
} }

View File

@ -71,7 +71,7 @@ public class TestAbandonBlock {
fout.hflush(); fout.hflush();
// Now abandon the last block // Now abandon the last block
DFSClient dfsclient = ((DistributedFileSystem)fs).getClient(); DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1); LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1);
LocatedBlock b = blocks.getLastLocatedBlock(); LocatedBlock b = blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(), src, dfsclient.clientName); dfsclient.getNamenode().abandonBlock(b.getBlock(), src, dfsclient.clientName);

View File

@ -80,7 +80,7 @@ public class TestLeaseRecovery extends junit.framework.TestCase {
String filestr = "/foo"; String filestr = "/foo";
Path filepath = new Path(filestr); Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L); DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
assertTrue(dfs.dfs.exists(filestr)); assertTrue(dfs.exists(filepath));
DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM); DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);
//get block info for the last block //get block info for the last block

View File

@ -25,6 +25,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClientAdapter;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
@ -84,10 +85,11 @@ public class TestInterDatanodeProtocol {
String filestr = "/foo"; String filestr = "/foo";
Path filepath = new Path(filestr); Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L); DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
assertTrue(dfs.getClient().exists(filestr)); assertTrue(dfs.exists(filepath));
//get block info //get block info
LocatedBlock locatedblock = getLastLocatedBlock(dfs.getClient().getNamenode(), filestr); LocatedBlock locatedblock = getLastLocatedBlock(
DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
assertTrue(datanodeinfo.length > 0); assertTrue(datanodeinfo.length > 0);
@ -236,7 +238,7 @@ public class TestInterDatanodeProtocol {
//get block info //get block info
final LocatedBlock locatedblock = getLastLocatedBlock( final LocatedBlock locatedblock = getLastLocatedBlock(
dfs.getClient().getNamenode(), filestr); DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
final DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0); Assert.assertTrue(datanodeinfo.length > 0);

View File

@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClientAdapter;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
@ -32,7 +33,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
@ -124,7 +124,7 @@ public class TestTransferRbw {
final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(), final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(),
oldrbw.getGenerationStamp()); oldrbw.getGenerationStamp());
final BlockOpResponseProto s = DFSTestUtil.transferRbw( final BlockOpResponseProto s = DFSTestUtil.transferRbw(
b, fs.getClient(), oldnodeinfo, newnodeinfo); b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo);
Assert.assertEquals(Status.SUCCESS, s.getStatus()); Assert.assertEquals(Status.SUCCESS, s.getStatus());
} }

View File

@ -221,7 +221,7 @@ public class OfflineEditsViewerHelper {
// OP_REASSIGN_LEASE 22 // OP_REASSIGN_LEASE 22
String filePath = "/hard-lease-recovery-test"; String filePath = "/hard-lease-recovery-test";
byte[] bytes = "foo-bar-baz".getBytes(); byte[] bytes = "foo-bar-baz".getBytes();
DFSClientAdapter.stopLeaseRenewer(dfs.getClient()); DFSClientAdapter.stopLeaseRenewer(dfs);
FSDataOutputStream leaseRecoveryPath = dfs.create(new Path(filePath)); FSDataOutputStream leaseRecoveryPath = dfs.create(new Path(filePath));
leaseRecoveryPath.write(bytes); leaseRecoveryPath.write(bytes);
leaseRecoveryPath.hflush(); leaseRecoveryPath.hflush();

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClientAdapter;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -72,7 +73,7 @@ public class TestBlockUnderConstruction {
// wait until the block is allocated by DataStreamer // wait until the block is allocated by DataStreamer
BlockLocation[] locatedBlocks; BlockLocation[] locatedBlocks;
while(blocksAfter <= blocksBefore) { while(blocksAfter <= blocksBefore) {
locatedBlocks = hdfs.getClient().getBlockLocations( locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS); file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length; blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
} }