From 590df942532787eda007e933a9766b9de20c7200 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Tue, 25 Mar 2014 05:56:53 +0000 Subject: [PATCH] HDFS-6125. Merge 1581242 from trunk git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1581243 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../contrib/bkjournal/EditLogLedgerMetadata.java | 2 +- .../hadoop/hdfs/server/datanode/DataStorage.java | 2 +- .../hdfs/server/namenode/ClusterJspHelper.java | 2 +- .../hadoop/hdfs/server/namenode/FSDirectory.java | 2 +- .../hadoop/hdfs/server/namenode/FSEditLogOp.java | 2 +- .../hdfs/server/namenode/FSNamesystem.java | 2 +- .../server/namenode/FileChecksumServlets.java | 2 +- .../hdfs/server/namenode/FileDataServlet.java | 2 +- .../ha/ConfiguredFailoverProxyProvider.java | 2 +- .../snapshot/FSImageFormatPBSnapshot.java | 2 +- .../hdfs/tools/DelegationTokenFetcher.java | 3 +-- .../org/apache/hadoop/hdfs/web/JsonUtil.java | 2 +- .../org/apache/hadoop/hdfs/FileAppendTest4.java | 2 +- .../hadoop/hdfs/TestBalancerBandwidth.java | 2 +- .../hadoop/hdfs/TestBlockMissingException.java | 2 +- .../hadoop/hdfs/TestClientReportBadBlock.java | 2 +- .../org/apache/hadoop/hdfs/TestDFSMkdirs.java | 2 +- .../org/apache/hadoop/hdfs/TestDFSShell.java | 11 +++++------ .../hadoop/hdfs/TestDFSUpgradeFromImage.java | 2 +- .../org/apache/hadoop/hdfs/TestDecommission.java | 2 +- .../hadoop/hdfs/TestDistributedFileSystem.java | 4 ++-- .../org/apache/hadoop/hdfs/TestFileAppend3.java | 2 +- .../hadoop/hdfs/TestFileCreationEmpty.java | 2 +- .../hdfs/TestFileLengthOnClusterRestart.java | 2 +- .../java/org/apache/hadoop/hdfs/TestHFlush.java | 8 ++++---- .../apache/hadoop/hdfs/TestLeaseRecovery.java | 2 +- .../apache/hadoop/hdfs/TestMiniDFSCluster.java | 2 +- .../hadoop/hdfs/TestMissingBlocksAlert.java | 4 ++-- .../org/apache/hadoop/hdfs/TestPipelines.java | 2 +- .../hdfs/TestReplaceDatanodeOnFailure.java | 4 ++-- .../TestClientProtocolWithDelegationToken.java | 2 +- .../blockmanagement/TestHeartbeatHandling.java | 16 ++++++++-------- .../server/datanode/TestCachingStrategy.java | 8 ++++---- .../server/datanode/TestDataNodeMetrics.java | 2 +- .../hdfs/server/datanode/TestTransferRbw.java | 2 +- .../namenode/OfflineEditsViewerHelper.java | 2 +- .../namenode/TestBlockUnderConstruction.java | 2 +- .../TestCheckPointForSecurityTokens.java | 2 +- .../server/namenode/TestFSPermissionChecker.java | 2 +- .../hadoop/hdfs/server/namenode/TestFsck.java | 8 ++++---- .../hdfs/server/namenode/TestHDFSConcat.java | 2 +- .../namenode/TestNNStorageRetentionManager.java | 2 +- .../hdfs/server/namenode/TestSaveNamespace.java | 2 +- .../namenode/TestSecurityTokenEditLog.java | 4 ++-- .../hadoop/hdfs/server/namenode/TestStartup.java | 2 +- .../server/namenode/ha/TestRetryCacheWithHA.java | 2 +- .../TestNNMetricFilesInGetListingOps.java | 2 +- .../namenode/metrics/TestNameNodeMetrics.java | 2 +- .../snapshot/TestRenameWithSnapshots.java | 4 ++-- .../server/namenode/snapshot/TestSnapshot.java | 2 +- .../hadoop/hdfs/util/TestLightWeightHashSet.java | 2 +- .../hdfs/util/TestLightWeightLinkedSet.java | 2 +- .../hdfs/web/TestByteRangeInputStream.java | 2 +- .../org/apache/hadoop/hdfs/web/TestJsonUtil.java | 4 ++-- 55 files changed, 82 insertions(+), 82 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 798b3ed3cf4..fa3430b0966 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -11,6 +11,8 @@ Release 2.5.0 - UNRELEASED HDFS-6007. Update documentation about short-circuit local reads (iwasakims via cmccabe) + HDFS-6125. Cleanup unnecessary cast in HDFS code base. (suresh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java index 6aa87e7142b..039cb3a6122 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/EditLogLedgerMetadata.java @@ -203,7 +203,7 @@ public class EditLogLedgerMetadata { hash = hash * 31 + (int) ledgerId; hash = hash * 31 + (int) firstTxId; hash = hash * 31 + (int) lastTxId; - hash = hash * 31 + (int) dataLayoutVersion; + hash = hash * 31 + dataLayoutVersion; return hash; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index c6ea6a2edd8..6f32f926b57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -143,7 +143,7 @@ public class DataStorage extends Storage { */ public String getTrashDirectoryForBlockFile(String bpid, File blockFile) { if (trashEnabledBpids.contains(bpid)) { - return ((BlockPoolSliceStorage) getBPStorage(bpid)).getTrashDirectory(blockFile); + return getBPStorage(bpid).getTrashDirectory(blockFile); } return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java index e2e71e40065..2d67426ba82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java @@ -308,7 +308,7 @@ class ClusterJspHelper { // Inner map of attribute name to value Map innerMap = entry.getValue(); if (innerMap != null) { - if (((String) innerMap.get("adminState")) + if (innerMap.get("adminState") .equals(AdminStates.DECOMMISSIONED.toString())) { nn.liveDecomCount++; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 055b7064bcd..8210495888d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2396,7 +2396,7 @@ public class FSDirectory implements Closeable { */ public final void addToInodeMap(INode inode) { if (inode instanceof INodeWithAdditionalFields) { - inodeMap.put((INodeWithAdditionalFields)inode); + inodeMap.put(inode); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 7a824230072..3e90b99e563 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -3507,7 +3507,7 @@ public abstract class FSEditLogOp { @Override void readFields(DataInputStream in, int logVersion) throws IOException { - AclEditLogProto p = AclEditLogProto.parseDelimitedFrom((DataInputStream)in); + AclEditLogProto p = AclEditLogProto.parseDelimitedFrom(in); if (p == null) { throw new IOException("Failed to read fields from SetAclOp"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index c508e2f98ca..decd6bb78c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3911,7 +3911,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, assert hasReadLock(); final BlockCollection bc = blockUC.getBlockCollection(); if (bc == null || !(bc instanceof INodeFile) - || !((INodeFile) bc).isUnderConstruction()) { + || !bc.isUnderConstruction()) { return false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java index 4e0ec8f4550..d77010d8850 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java @@ -55,7 +55,7 @@ public class FileChecksumServlets { HttpServletRequest request, NameNode nn) throws IOException { final String hostname = host instanceof DatanodeInfo - ? ((DatanodeInfo)host).getHostName() : host.getIpAddr(); + ? host.getHostName() : host.getIpAddr(); final String scheme = request.getScheme(); int port = host.getInfoPort(); if ("https".equals(scheme)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java index 1655c4781dc..d6f36c21639 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java @@ -56,7 +56,7 @@ public class FileDataServlet extends DfsServlet { final DatanodeID host = pickSrcDatanode(blks, status, conf); final String hostname; if (host instanceof DatanodeInfo) { - hostname = ((DatanodeInfo)host).getHostName(); + hostname = host.getHostName(); } else { hostname = host.getIpAddr(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java index 608cd00080e..84ed633874d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java @@ -128,7 +128,7 @@ public class ConfiguredFailoverProxyProvider implements throw new RuntimeException(e); } } - return new ProxyInfo((T)current.namenode, current.address.toString()); + return new ProxyInfo(current.namenode, current.address.toString()); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java index 2a7b242ceb3..4f0e507d9c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java @@ -414,7 +414,7 @@ public class FSImageFormatPBSnapshot { rb.setLastSnapshotId(((WithName) ref).getLastSnapshotId()).setName( ByteString.copyFrom(ref.getLocalNameBytes())); } else if (ref instanceof DstReference) { - rb.setDstSnapshotId(((DstReference) ref).getDstSnapshotId()); + rb.setDstSnapshotId(ref.getDstSnapshotId()); } return rb; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index 385cbb346da..feaff694556 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -346,8 +346,7 @@ public class DelegationTokenFetcher { Class.forName(exceptionClass).asSubclass(Exception.class); // we are interested in constructor with String arguments java.lang.reflect.Constructor constructor = - (java.lang.reflect.Constructor) - ec.getConstructor (new Class[] {String.class}); + ec.getConstructor (new Class[] {String.class}); // create an instance e = (IOException) constructor.newInstance (exceptionMsg); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index afae1a3e28f..cc31bfe61b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -317,7 +317,7 @@ public class JsonUtil { if (value == null) { return defaultValue; } - return (long) (Long) value; + return (Long) value; } private static String getString(Map m, String key, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java index 29306dc704a..486024474f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java @@ -62,7 +62,7 @@ public class FileAppendTest4 { conf = new HdfsConfiguration(); init(conf); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build(); - fs = (DistributedFileSystem)cluster.getFileSystem(); + fs = cluster.getFileSystem(); } @AfterClass diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java index 3f29932c1a5..29869b1b0ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java @@ -50,7 +50,7 @@ public class TestBalancerBandwidth { try { cluster.waitActive(); - DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); + DistributedFileSystem fs = cluster.getFileSystem(); ArrayList datanodes = cluster.getDataNodes(); // Ensure value from the configuration is reflected in the datanodes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java index a885ff4b13d..132ed3f4b4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java @@ -54,7 +54,7 @@ public class TestBlockMissingException { try { dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build(); dfs.waitActive(); - fileSys = (DistributedFileSystem)dfs.getFileSystem(); + fileSys = dfs.getFileSystem(); Path file1 = new Path("/user/dhruba/raidtest/file1"); createOldFile(fileSys, file1, 1, numBlocks, blockSize); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java index 8d9ede019f5..6023a493b18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java @@ -77,7 +77,7 @@ public class TestClientReportBadBlock { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes) .build(); cluster.waitActive(); - dfs = (DistributedFileSystem) cluster.getFileSystem(); + dfs = cluster.getFileSystem(); buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java index e516d99f975..e19f3281e20 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java @@ -92,7 +92,7 @@ public class TestDFSMkdirs { public void testMkdir() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); - DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); + DistributedFileSystem dfs = cluster.getFileSystem(); try { // Create a dir in root dir, should succeed assertTrue(dfs.mkdir(new Path("/mkdir-" + Time.now()), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 5c4aca7f29c..e94972d03f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -657,8 +657,7 @@ public class TestDFSShell { // Test a simple codec. Extension based detection. We use // Bzip2 cause its non-native. - CompressionCodec codec = (CompressionCodec) - ReflectionUtils.newInstance(BZip2Codec.class, conf); + CompressionCodec codec = ReflectionUtils.newInstance(BZip2Codec.class, conf); String extension = codec.getDefaultExtension(); Path p = new Path(root, "file." + extension); OutputStream fout = new DataOutputStream(codec.createOutputStream( @@ -785,7 +784,7 @@ public class TestDFSShell { public void testCount() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); - DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); + DistributedFileSystem dfs = cluster.getFileSystem(); FsShell shell = new FsShell(); shell.setConf(conf); @@ -1488,7 +1487,7 @@ public class TestDFSShell { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true) .build(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); mkdir(dfs, root); dfs.copyFromLocalFile(false, false, new Path(localf.getPath()), remotef); @@ -1517,7 +1516,7 @@ public class TestDFSShell { // Start the cluster again, but do not reformat, so prior files remain. cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(false) .build(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); assertEquals(null, runner.run(1)); String corruptedcontent = runner.run(0, "-ignoreCrc"); @@ -1541,7 +1540,7 @@ public class TestDFSShell { public void testLsr() throws Exception { final Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); - DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); + DistributedFileSystem dfs = cluster.getFileSystem(); try { final String root = createTree(dfs, "lsr"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index 4b87eb16769..1e1f668f210 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -443,7 +443,7 @@ public class TestDFSUpgradeFromImage { .clusterId("testClusterId"); cluster = bld.build(); cluster.waitActive(); - DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); + DistributedFileSystem dfs = cluster.getFileSystem(); DFSClient dfsClient = dfs.dfs; //Safemode will be off only after upgrade is complete. Wait for it. while ( dfsClient.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET) ) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 073179a495f..25cec12e23a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -156,7 +156,7 @@ public class TestDecommission { assertTrue("Not HDFS:"+fileSys.getUri(), fileSys instanceof DistributedFileSystem); HdfsDataInputStream dis = (HdfsDataInputStream) - ((DistributedFileSystem)fileSys).open(name); + fileSys.open(name); Collection dinfo = dis.getAllBlocks(); for (LocatedBlock blk : dinfo) { // for each block int hasdown = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index a0198bb7335..eb8a0137f54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -479,7 +479,7 @@ public class TestDistributedFileSystem { current.getShortUserName() + "x", new String[]{"user"}); try { - ((DistributedFileSystem) hdfs).getFileChecksum(new Path( + hdfs.getFileChecksum(new Path( "/test/TestNonExistingFile")); fail("Expecting FileNotFoundException"); } catch (FileNotFoundException e) { @@ -490,7 +490,7 @@ public class TestDistributedFileSystem { try { Path path = new Path("/test/TestExistingDir/"); hdfs.mkdirs(path); - ((DistributedFileSystem) hdfs).getFileChecksum(path); + hdfs.getFileChecksum(path); fail("Expecting FileNotFoundException"); } catch (FileNotFoundException e) { assertTrue("Not throwing the intended exception message", e.getMessage() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java index a2ab1edda2d..9213bb094a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java @@ -74,7 +74,7 @@ public class TestFileAppend3 { conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512); buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build(); - fs = (DistributedFileSystem)cluster.getFileSystem(); + fs = cluster.getFileSystem(); } @AfterClass diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java index 785af2373d6..fbada206bdb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java @@ -62,7 +62,7 @@ public class TestFileCreationEmpty { MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build(); try { cluster.waitActive(); - DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); + DistributedFileSystem dfs = cluster.getFileSystem(); // create a new file. TestFileCreation.createFile(dfs, new Path("/foo"), DATANODE_NUM); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java index 8bafee67f85..c8420ca82a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java @@ -44,7 +44,7 @@ public class TestFileLengthOnClusterRestart { HdfsDataInputStream in = null; try { Path path = new Path("/tmp/TestFileLengthOnClusterRestart", "test"); - DistributedFileSystem dfs = (DistributedFileSystem) cluster + DistributedFileSystem dfs = cluster .getFileSystem(); FSDataOutputStream out = dfs.create(path); int fileLength = 1030; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java index dfe45f86d0c..7b4d2bb9911 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java @@ -104,7 +104,7 @@ public class TestHFlush { MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes( 2).build(); DistributedFileSystem fileSystem = - (DistributedFileSystem)cluster.getFileSystem(); + cluster.getFileSystem(); try { Path path = new Path(fName); @@ -198,7 +198,7 @@ public class TestHFlush { .numDataNodes(replicas).build(); // Make sure we work with DFS in order to utilize all its functionality DistributedFileSystem fileSystem = - (DistributedFileSystem)cluster.getFileSystem(); + cluster.getFileSystem(); FSDataInputStream is; try { @@ -278,7 +278,7 @@ public class TestHFlush { MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build(); try { - DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem(); + DistributedFileSystem fs = cluster.getFileSystem(); byte[] fileContents = AppendTestUtil.initBuffer(fileLen); @@ -328,7 +328,7 @@ public class TestHFlush { MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build(); try { - DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem(); + DistributedFileSystem fs = cluster.getFileSystem(); // create a new file. FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, DATANODE_NUM); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java index 91c9ba80b04..3328d8e80eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java @@ -89,7 +89,7 @@ public class TestLeaseRecovery { cluster.waitActive(); //create a file - DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); + DistributedFileSystem dfs = cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java index 84678da2d40..cf29d97007c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java @@ -109,7 +109,7 @@ public class TestMiniDFSCluster { conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path); MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build(); try { - DistributedFileSystem dfs = (DistributedFileSystem) cluster4.getFileSystem(); + DistributedFileSystem dfs = cluster4.getFileSystem(); dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); cluster4.shutdown(); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java index 5516ced5fe0..09fac4946e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java @@ -62,8 +62,8 @@ public class TestMissingBlocksAlert { cluster.waitActive(); final BlockManager bm = cluster.getNamesystem().getBlockManager(); - DistributedFileSystem dfs = - (DistributedFileSystem) cluster.getFileSystem(); + DistributedFileSystem dfs = + cluster.getFileSystem(); // create a normal file DFSTestUtil.createFile(dfs, new Path("/testMissingBlocksAlert/file1"), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java index 209a0d5599a..e0bcb0ff671 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java @@ -63,7 +63,7 @@ public class TestPipelines { @Before public void startUpCluster() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build(); - fs = (DistributedFileSystem) cluster.getFileSystem(); + fs = cluster.getFileSystem(); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index ab8df438dd9..a2908674c5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -122,7 +122,7 @@ public class TestReplaceDatanodeOnFailure { ).racks(racks).numDataNodes(REPLICATION).build(); try { - final DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem(); + final DistributedFileSystem fs = cluster.getFileSystem(); final Path dir = new Path(DIR); final SlowWriter[] slowwriters = new SlowWriter[10]; @@ -246,7 +246,7 @@ public class TestReplaceDatanodeOnFailure { ).numDataNodes(1).build(); try { - final DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem(); + final DistributedFileSystem fs = cluster.getFileSystem(); final Path f = new Path(DIR, "testAppend"); { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java index ef885280d73..10dc65f9d89 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java @@ -102,7 +102,7 @@ public class TestClientProtocolWithDelegationToken { public Object run() throws Exception { ClientProtocol proxy = null; try { - proxy = (ClientProtocol) RPC.getProxy(ClientProtocol.class, + proxy = RPC.getProxy(ClientProtocol.class, ClientProtocol.versionID, addr, conf); proxy.getServerDefaults(); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java index 4b27aa68b3c..510f1596825 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java @@ -184,9 +184,9 @@ public class TestHeartbeatHandling { DatanodeInfo[] recoveringNodes = recoveryCommand.getRecoveringBlocks() .toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations(); assertEquals(3, recoveringNodes.length); - assertEquals(recoveringNodes[0], (DatanodeInfo)dd1); - assertEquals(recoveringNodes[1], (DatanodeInfo)dd2); - assertEquals(recoveringNodes[2], (DatanodeInfo)dd3); + assertEquals(recoveringNodes[0], dd1); + assertEquals(recoveringNodes[1], dd2); + assertEquals(recoveringNodes[2], dd3); // Test with one stale node. dd1.setLastUpdate(System.currentTimeMillis()); @@ -206,8 +206,8 @@ public class TestHeartbeatHandling { .toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations(); assertEquals(2, recoveringNodes.length); // dd2 is skipped. - assertEquals(recoveringNodes[0], (DatanodeInfo)dd1); - assertEquals(recoveringNodes[1], (DatanodeInfo)dd3); + assertEquals(recoveringNodes[0], dd1); + assertEquals(recoveringNodes[1], dd3); // Test with all stale node. dd1.setLastUpdate(System.currentTimeMillis() - 60 * 1000); @@ -228,9 +228,9 @@ public class TestHeartbeatHandling { // Only dd1 is included since it heart beated and hence its not stale // when the list of recovery blocks is constructed. assertEquals(3, recoveringNodes.length); - assertEquals(recoveringNodes[0], (DatanodeInfo)dd1); - assertEquals(recoveringNodes[1], (DatanodeInfo)dd2); - assertEquals(recoveringNodes[2], (DatanodeInfo)dd3); + assertEquals(recoveringNodes[0], dd1); + assertEquals(recoveringNodes[1], dd2); + assertEquals(recoveringNodes[2], dd3); } } finally { namesystem.writeUnlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java index 73fdd73b171..542daa9d899 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java @@ -79,8 +79,8 @@ public class TestCachingStrategy { LOG.debug("got fadvise(offset=" + offset + ", len=" + len + ",flags=" + flags + ")"); if (flags == NativeIO.POSIX.POSIX_FADV_DONTNEED) { - for (int i = 0; i < (int)len; i++) { - dropped[(int)(offset + i)] = true; + for (int i = 0; i < len; i++) { + dropped[(offset + i)] = true; } } } @@ -165,7 +165,7 @@ public class TestCachingStrategy { } byte buf[] = new byte[8196]; while (length > 0) { - int amt = (length > buf.length) ? (int)buf.length : (int)length; + int amt = (length > buf.length) ? buf.length : (int)length; fos.write(buf, 0, amt); length -= amt; } @@ -189,7 +189,7 @@ public class TestCachingStrategy { } byte buf[] = new byte[8196]; while (length > 0) { - int amt = (length > buf.length) ? (int)buf.length : (int)length; + int amt = (length > buf.length) ? buf.length : (int)length; int ret = fis.read(buf, 0, amt); if (ret == -1) { return totalRead; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java index 4921f0307eb..9b90d41198d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java @@ -103,7 +103,7 @@ public class TestDataNodeMetrics { MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); - DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); + DistributedFileSystem fs = cluster.getFileSystem(); Path testFile = new Path("/testFlushNanosMetric.txt"); FSDataOutputStream fout = fs.create(testFile); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java index 55a87bd8ce4..36d595bb253 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java @@ -78,7 +78,7 @@ public class TestTransferRbw { ).numDataNodes(REPLICATION).build(); try { cluster.waitActive(); - final DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem(); + final DistributedFileSystem fs = cluster.getFileSystem(); //create a file, write some data and leave it open. final Path p = new Path("/foo"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java index d2d24a01ad7..b5ba108af83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java @@ -126,7 +126,7 @@ public class OfflineEditsViewerHelper { private CheckpointSignature runOperations() throws IOException { LOG.info("Creating edits by performing fs operations"); // no check, if it's not it throws an exception which is what we want - DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); + DistributedFileSystem dfs = cluster.getFileSystem(); DFSTestUtil.runOperations(cluster, dfs, cluster.getConfiguration(0), dfs.getDefaultBlockSize(), 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java index 27490a2e8d3..5448e7a885e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java @@ -56,7 +56,7 @@ public class TestBlockUnderConstruction { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); - hdfs = (DistributedFileSystem)cluster.getFileSystem(); + hdfs = cluster.getFileSystem(); } @AfterClass diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java index 8536a2cb66e..9401d076d5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java @@ -71,7 +71,7 @@ public class TestCheckPointForSecurityTokens { DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); - fs = (DistributedFileSystem)(cluster.getFileSystem()); + fs = cluster.getFileSystem(); FSNamesystem namesystem = cluster.getNamesystem(); String renewer = UserGroupInformation.getLoginUser().getUserName(); Token token1 = namesystem diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java index fd812794623..6acc5d47d49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java @@ -372,7 +372,7 @@ public class TestFSPermissionChecker { private void addAcl(INodeWithAdditionalFields inode, AclEntry... acl) throws IOException { - AclStorage.updateINodeAcl((INodeWithAdditionalFields)inode, + AclStorage.updateINodeAcl(inode, Arrays.asList(acl), Snapshot.CURRENT_STATE_ID); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index a6dd4fea1b1..5a0c3559da9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -464,7 +464,7 @@ public class TestFsck { len = (int)(in.getFileLength() % blockSize); if (len == 0) len = blockBuffer.length; } - IOUtils.readFully(in, blockBuffer, 0, (int)len); + IOUtils.readFully(in, blockBuffer, 0, len); int startIdx = blockIdx * blockSize; for (int i = 0; i < len; i++) { if (initialContents[startIdx + i] != blockBuffer[i]) { @@ -851,7 +851,7 @@ public class TestFsck { new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build(); assertNotNull("Failed Cluster Creation", cluster); cluster.waitClusterUp(); - dfs = (DistributedFileSystem) cluster.getFileSystem(); + dfs = cluster.getFileSystem(); assertNotNull("Failed to get FileSystem", dfs); // Create a file that will be intentionally under-replicated @@ -923,7 +923,7 @@ public class TestFsck { .racks(racks).build(); assertNotNull("Failed Cluster Creation", cluster); cluster.waitClusterUp(); - dfs = (DistributedFileSystem) cluster.getFileSystem(); + dfs = cluster.getFileSystem(); assertNotNull("Failed to get FileSystem", dfs); // Create a file that will be intentionally under-replicated @@ -946,7 +946,7 @@ public class TestFsck { PrintWriter out = new PrintWriter(result, true); InetAddress remoteAddress = InetAddress.getLocalHost(); NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, - NUM_DN, (short)REPL_FACTOR, remoteAddress); + NUM_DN, REPL_FACTOR, remoteAddress); // Run the fsck and check the Result final HdfsFileStatus file = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java index 91c0513cb52..6d1f4521f8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java @@ -70,7 +70,7 @@ public class TestHDFSConcat { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build(); assertNotNull("Failed Cluster Creation", cluster); cluster.waitClusterUp(); - dfs = (DistributedFileSystem) cluster.getFileSystem(); + dfs = cluster.getFileSystem(); assertNotNull("Failed to get FileSystem", dfs); nn = cluster.getNameNodeRpc(); assertNotNull("Failed to get NameNode", nn); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java index b3acc7532bd..8f08ef39f87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java @@ -361,7 +361,7 @@ public class TestNNStorageRetentionManager { public Void answer(InvocationOnMock invocation) throws Throwable { Object[] args = invocation.getArguments(); journalSet.selectInputStreams((Collection)args[0], - (long)((Long)args[1]), (boolean)((Boolean)args[2])); + (Long)args[1], (Boolean)args[2]); return null; } }).when(mockLog).selectInputStreams(Mockito.anyCollection(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java index 87ea06129a6..2d8e82f9dc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java @@ -609,7 +609,7 @@ public class TestSaveNamespace { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()) .numDataNodes(1).build(); cluster.waitActive(); - DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); + DistributedFileSystem fs = cluster.getFileSystem(); try { cluster.getNamesystem().leaseManager.addLease("me", "/non-existent"); fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java index 9352b4a6a15..5aa19bba359 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java @@ -191,9 +191,9 @@ public class TestSecurityTokenEditLog { Token token1 = fsn.getDelegationToken(renewer); Token token2 = fsn.getDelegationToken(renewer); DelegationTokenIdentifier ident1 = - (DelegationTokenIdentifier)token1.decodeIdentifier(); + token1.decodeIdentifier(); DelegationTokenIdentifier ident2 = - (DelegationTokenIdentifier)token2.decodeIdentifier(); + token2.decodeIdentifier(); // verify we got the tokens verify(log, times(1)).logGetDelegationToken(eq(ident1), anyLong()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 625e22ce282..fa24856c840 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -212,7 +212,7 @@ public class TestStartup { private void corruptNameNodeFiles() throws IOException { // now corrupt/delete the directrory List nameDirs = (List)FSNamesystem.getNamespaceDirs(config); - List nameEditsDirs = (List)FSNamesystem.getNamespaceEditsDirs(config); + List nameEditsDirs = FSNamesystem.getNamespaceEditsDirs(config); // get name dir and its length, then delete and recreate the directory File dir = new File(nameDirs.get(0).getPath()); // has only one diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index 6a7734dc3af..16fbd74abbd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -954,7 +954,7 @@ public class TestRetryCacheWithHA { boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { RemoteIterator iter = dfs.listCachePools(); - if (iter.hasNext() && (long)iter.next().getInfo().getLimit() == 99) { + if (iter.hasNext() && iter.next().getInfo().getLimit() == 99) { return true; } Thread.sleep(1000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java index 88ec5962e49..47a893dfd2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java @@ -57,7 +57,7 @@ public class TestNNMetricFilesInGetListingOps { cluster = new MiniDFSCluster.Builder(CONF).build(); cluster.waitActive(); cluster.getNameNode(); - fs = (DistributedFileSystem) cluster.getFileSystem(); + fs = cluster.getFileSystem(); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index b18df9b7e59..fc56c16fbf0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -105,7 +105,7 @@ public class TestNameNodeMetrics { cluster.waitActive(); namesystem = cluster.getNamesystem(); bm = namesystem.getBlockManager(); - fs = (DistributedFileSystem) cluster.getFileSystem(); + fs = cluster.getFileSystem(); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index 72f8cbe55da..3318ad9cb63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -1592,7 +1592,7 @@ public class TestRenameWithSnapshots { INode barNode = fsdir.getINode4Write(bar.toString()); assertTrue(barNode.getClass() == INodeFile.class); assertSame(fooNode, barNode.getParent()); - List diffList = ((INodeDirectorySnapshottable) dir1Node) + List diffList = dir1Node .getDiffs().asList(); assertEquals(1, diffList.size()); DirectoryDiff diff = diffList.get(0); @@ -1666,7 +1666,7 @@ public class TestRenameWithSnapshots { INode fooNode = childrenList.get(0); assertTrue(fooNode.asDirectory().isWithSnapshot()); assertSame(dir1Node, fooNode.getParent()); - List diffList = ((INodeDirectorySnapshottable) dir1Node) + List diffList = dir1Node .getDiffs().asList(); assertEquals(1, diffList.size()); DirectoryDiff diff = diffList.get(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java index 4a871642542..d71424b33e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java @@ -490,7 +490,7 @@ public class TestSnapshot { // ... // Modification create = new FileCreation( - node.fileList.get(node.nullFileIndex), hdfs, (int) BLOCKSIZE); + node.fileList.get(node.nullFileIndex), hdfs, BLOCKSIZE); Modification delete = new FileDeletion( node.fileList.get((node.nullFileIndex + 1) % node.fileList.size()), hdfs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java index 3c9f340b038..bb274834c3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java @@ -421,7 +421,7 @@ public class TestLightWeightHashSet{ assertEquals(NUM - 10, array2.length); for (int i = 0; i < array2.length; i++) { - assertTrue(sub2.contains((Integer) array2[i])); + assertTrue(sub2.contains(array2[i])); } LOG.info("Test other - DONE"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java index a5106e4f97a..e8b365a429c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java @@ -358,7 +358,7 @@ public class TestLightWeightLinkedSet { Object[] array2 = set.toArray(); assertEquals(NUM, array2.length); for (int i = 0; i < array2.length; i++) { - assertTrue(list.contains((Integer) array2[i])); + assertTrue(list.contains(array2[i])); } LOG.info("Test capacity - DONE"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java index b4221c0249b..9e93166c398 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java @@ -103,7 +103,7 @@ public static class MockHttpURLConnection extends HttpURLConnection { doReturn(new MockHttpURLConnection(ospy.getURL())).when(ospy) .openConnection(); HftpFileSystem.RangeHeaderUrlOpener rspy = spy( - new HftpFileSystem.RangeHeaderUrlOpener(factory, (URL) null)); + new HftpFileSystem.RangeHeaderUrlOpener(factory, null)); doReturn(new MockHttpURLConnection(rspy.getURL())).when(rspy) .openConnection(); ByteRangeInputStream is = new HftpFileSystem.RangeHeaderInputStream(ospy, rspy); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index 2bce30f0ca5..d161a1d353a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -124,8 +124,8 @@ public class TestJsonUtil { // The encoded result should contain name, ipAddr and xferPort. Map r = JsonUtil.toJsonMap(di); - Assert.assertEquals(name, (String)r.get("name")); - Assert.assertEquals("127.0.0.1", (String)r.get("ipAddr")); + Assert.assertEquals(name, r.get("name")); + Assert.assertEquals("127.0.0.1", r.get("ipAddr")); // In this test, it is Integer instead of Long since json was not actually // involved in constructing the map. Assert.assertEquals(1004, (int)(Integer)r.get("xferPort"));