From b9bf1a9391f0619d75604a5601763c2fab1ae1c5 Mon Sep 17 00:00:00 2001 From: Colin Patrick Mccabe Date: Tue, 8 Sep 2015 18:12:47 -0700 Subject: [PATCH] HDFS-8860. Remove unused Replica copyOnWrite code (Lei (Eddy) Xu via Colin P. McCabe) (cherry picked from commit a153b9601ad8628fdd608d8696310ca8c1f58ff0) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../server/datanode/FinalizedReplica.java | 15 +--- .../hdfs/server/datanode/ReplicaInfo.java | 82 ------------------- .../server/datanode/ReplicaUnderRecovery.java | 10 --- .../datanode/ReplicaWaitingToBeRecovered.java | 15 +--- .../fsdataset/impl/FsDatasetImpl.java | 3 - .../apache/hadoop/hdfs/TestFileAppend.java | 72 ---------------- .../server/datanode/DataNodeTestUtils.java | 5 -- .../fsdataset/impl/FsDatasetTestUtil.java | 6 -- .../fsdataset/impl/TestDatanodeRestart.java | 72 ---------------- 10 files changed, 4 insertions(+), 278 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5114e8abdb8..75c0087859e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -555,6 +555,8 @@ Release 2.8.0 - UNRELEASED HDFS-9019. Adding informative message to sticky bit permission denied exception. (xyao) + HDFS-8860. Remove unused Replica copyOnWrite code (Lei (Eddy) Xu via Colin P. McCabe) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java index cc3287406a2..8daeb51e0df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; * This class describes a replica that has been finalized. */ public class FinalizedReplica extends ReplicaInfo { - private boolean unlinked; // copy-on-write done for block /** * Constructor @@ -58,7 +57,6 @@ public class FinalizedReplica extends ReplicaInfo { */ public FinalizedReplica(FinalizedReplica from) { super(from); - this.unlinked = from.isUnlinked(); } @Override // ReplicaInfo @@ -66,16 +64,6 @@ public class FinalizedReplica extends ReplicaInfo { return ReplicaState.FINALIZED; } - @Override // ReplicaInfo - public boolean isUnlinked() { - return unlinked; - } - - @Override // ReplicaInfo - public void setUnlinked() { - unlinked = true; - } - @Override public long getVisibleLength() { return getNumBytes(); // all bytes are visible @@ -98,7 +86,6 @@ public class FinalizedReplica extends ReplicaInfo { @Override public String toString() { - return super.toString() - + "\n unlinked =" + unlinked; + return super.toString(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java index 136d8a93bce..31b14faa63f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java @@ -196,22 +196,6 @@ abstract public class ReplicaInfo extends Block implements Replica { return new ReplicaDirInfo(currentDir.getAbsolutePath(), hasSubdirs); } - /** - * check if this replica has already been unlinked. - * @return true if the replica has already been unlinked - * or no need to be detached; false otherwise - */ - public boolean isUnlinked() { - return true; // no need to be unlinked - } - - /** - * set that this replica is unlinked - */ - public void setUnlinked() { - // no need to be unlinked - } - /** * Number of bytes reserved for this replica on disk. */ @@ -229,72 +213,6 @@ abstract public class ReplicaInfo extends Block implements Replica { return 0; } - /** - * Copy specified file into a temporary file. Then rename the - * temporary file to the original name. This will cause any - * hardlinks to the original file to be removed. The temporary - * files are created in the same directory. The temporary files will - * be recovered (especially on Windows) on datanode restart. - */ - private void unlinkFile(File file, Block b) throws IOException { - File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file)); - try { - FileInputStream in = new FileInputStream(file); - try { - FileOutputStream out = new FileOutputStream(tmpFile); - try { - IOUtils.copyBytes(in, out, 16*1024); - } finally { - out.close(); - } - } finally { - in.close(); - } - if (file.length() != tmpFile.length()) { - throw new IOException("Copy of file " + file + " size " + file.length()+ - " into file " + tmpFile + - " resulted in a size of " + tmpFile.length()); - } - FileUtil.replaceFile(tmpFile, file); - } catch (IOException e) { - boolean done = tmpFile.delete(); - if (!done) { - DataNode.LOG.info("detachFile failed to delete temporary file " + - tmpFile); - } - throw e; - } - } - - /** - * Remove a hard link by copying the block to a temporary place and - * then moving it back - * @param numLinks number of hard links - * @return true if copy is successful; - * false if it is already detached or no need to be detached - * @throws IOException if there is any copy error - */ - public boolean unlinkBlock(int numLinks) throws IOException { - if (isUnlinked()) { - return false; - } - File file = getBlockFile(); - if (file == null || getVolume() == null) { - throw new IOException("detachBlock:Block not found. " + this); - } - File meta = getMetaFile(); - - if (HardLink.getLinkCount(file) > numLinks) { - DataNode.LOG.info("CopyOnWrite for block " + this); - unlinkFile(file, this); - } - if (HardLink.getLinkCount(meta) > numLinks) { - unlinkFile(meta, this); - } - setUnlinked(); - return true; - } - @Override //Object public String toString() { return getClass().getSimpleName() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java index 2cd8a01218e..558ee217534 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java @@ -85,16 +85,6 @@ public class ReplicaUnderRecovery extends ReplicaInfo { public ReplicaInfo getOriginalReplica() { return original; } - - @Override //ReplicaInfo - public boolean isUnlinked() { - return original.isUnlinked(); - } - - @Override //ReplicaInfo - public void setUnlinked() { - original.setUnlinked(); - } @Override //ReplicaInfo public ReplicaState getState() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java index 26ab3dbe243..220649d1eba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; * lease recovery. */ public class ReplicaWaitingToBeRecovered extends ReplicaInfo { - private boolean unlinked; // copy-on-write done for block /** * Constructor @@ -64,7 +63,6 @@ public class ReplicaWaitingToBeRecovered extends ReplicaInfo { */ public ReplicaWaitingToBeRecovered(ReplicaWaitingToBeRecovered from) { super(from); - this.unlinked = from.isUnlinked(); } @Override //ReplicaInfo @@ -72,16 +70,6 @@ public class ReplicaWaitingToBeRecovered extends ReplicaInfo { return ReplicaState.RWR; } - @Override //ReplicaInfo - public boolean isUnlinked() { - return unlinked; - } - - @Override //ReplicaInfo - public void setUnlinked() { - unlinked = true; - } - @Override //ReplicaInfo public long getVisibleLength() { return -1; //no bytes are visible @@ -104,7 +92,6 @@ public class ReplicaWaitingToBeRecovered extends ReplicaInfo { @Override public String toString() { - return super.toString() - + "\n unlinked=" + unlinked; + return super.toString(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 4034ef890f6..7c77ad8a4d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -1114,8 +1114,6 @@ class FsDatasetImpl implements FsDatasetSpi { throws IOException { // If the block is cached, start uncaching it. cacheManager.uncacheBlock(bpid, replicaInfo.getBlockId()); - // unlink the finalized replica - replicaInfo.unlinkBlock(1); // construct a RBW replica with the new GS File blkfile = replicaInfo.getBlockFile(); @@ -2485,7 +2483,6 @@ class FsDatasetImpl implements FsDatasetSpi { + ", rur=" + rur); } if (rur.getNumBytes() > newlength) { - rur.unlinkBlock(1); truncateBlock(blockFile, metaFile, rur.getNumBytes(), newlength); if(!copyOnTruncate) { // update RUR with the new length diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index b604965cf99..be1f985046e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -109,78 +109,6 @@ public class TestFileAppend{ expected, "Read 1", false); } - /** - * Test that copy on write for blocks works correctly - * @throws IOException an exception might be thrown - */ - @Test - public void testCopyOnWrite() throws IOException { - Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); - FileSystem fs = cluster.getFileSystem(); - InetSocketAddress addr = new InetSocketAddress("localhost", - cluster.getNameNodePort()); - DFSClient client = new DFSClient(addr, conf); - try { - - // create a new file, write to it and close it. - // - Path file1 = new Path("/filestatus.dat"); - FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1); - writeFile(stm); - stm.close(); - - // Get a handle to the datanode - DataNode[] dn = cluster.listDataNodes(); - assertTrue("There should be only one datanode but found " + dn.length, - dn.length == 1); - - LocatedBlocks locations = client.getNamenode().getBlockLocations( - file1.toString(), 0, Long.MAX_VALUE); - List blocks = locations.getLocatedBlocks(); - - // - // Create hard links for a few of the blocks - // - for (int i = 0; i < blocks.size(); i = i + 2) { - ExtendedBlock b = blocks.get(i).getBlock(); - final File f = DataNodeTestUtils.getFile(dn[0], - b.getBlockPoolId(), b.getLocalBlock().getBlockId()); - File link = new File(f.toString() + ".link"); - System.out.println("Creating hardlink for File " + f + " to " + link); - HardLink.createHardLink(f, link); - } - - // - // Detach all blocks. This should remove hardlinks (if any) - // - for (int i = 0; i < blocks.size(); i++) { - ExtendedBlock b = blocks.get(i).getBlock(); - System.out.println("testCopyOnWrite detaching block " + b); - assertTrue("Detaching block " + b + " should have returned true", - DataNodeTestUtils.unlinkBlock(dn[0], b, 1)); - } - - // Since the blocks were already detached earlier, these calls should - // return false - // - for (int i = 0; i < blocks.size(); i++) { - ExtendedBlock b = blocks.get(i).getBlock(); - System.out.println("testCopyOnWrite detaching block " + b); - assertTrue("Detaching block " + b + " should have returned false", - !DataNodeTestUtils.unlinkBlock(dn[0], b, 1)); - } - - } finally { - client.close(); - fs.close(); - cluster.shutdown(); - } - } - /** * Test a simple flush on a simple HDFS file. * @throws IOException an exception might be thrown diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 2f9a3e5d93f..b4071de7b13 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -151,11 +151,6 @@ public class DataNodeTestUtils { throws IOException { return FsDatasetTestUtil.getMetaFile(dn.getFSDataset(), bpid, b); } - - public static boolean unlinkBlock(DataNode dn, ExtendedBlock bk, int numLinks - ) throws IOException { - return FsDatasetTestUtil.unlinkBlock(dn.getFSDataset(), bk, numLinks); - } public static long getPendingAsyncDeletions(DataNode dn) { return FsDatasetTestUtil.getPendingAsyncDeletions(dn.getFSDataset()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java index 7ac9b65db2a..164385ebe92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java @@ -54,12 +54,6 @@ public class FsDatasetTestUtil { return FsDatasetUtil.getMetaFile(getBlockFile(fsd, bpid, b), b .getGenerationStamp()); } - - public static boolean unlinkBlock(FsDatasetSpi fsd, - ExtendedBlock block, int numLinks) throws IOException { - final ReplicaInfo info = ((FsDatasetImpl)fsd).getReplicaInfo(block); - return info.unlinkBlock(numLinks); - } public static ReplicaInfo fetchReplicaInfo (final FsDatasetSpi fsd, final String bpid, final long blockId) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java index 4516696f1fe..8bbac9f2f0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java @@ -143,79 +143,7 @@ public class TestDatanodeRestart { } } - // test recovering unlinked tmp replicas - @Test public void testRecoverReplicas() throws Exception { - Configuration conf = new HdfsConfiguration(); - conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); - conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); - cluster.waitActive(); - try { - FileSystem fs = cluster.getFileSystem(); - for (int i=0; i<4; i++) { - Path fileName = new Path("/test"+i); - DFSTestUtil.createFile(fs, fileName, 1, (short)1, 0L); - DFSTestUtil.waitReplication(fs, fileName, (short)1); - } - String bpid = cluster.getNamesystem().getBlockPoolId(); - DataNode dn = cluster.getDataNodes().get(0); - Iterator replicasItor = - dataset(dn).volumeMap.replicas(bpid).iterator(); - ReplicaInfo replica = replicasItor.next(); - createUnlinkTmpFile(replica, true, true); // rename block file - createUnlinkTmpFile(replica, false, true); // rename meta file - replica = replicasItor.next(); - createUnlinkTmpFile(replica, true, false); // copy block file - createUnlinkTmpFile(replica, false, false); // copy meta file - replica = replicasItor.next(); - createUnlinkTmpFile(replica, true, true); // rename block file - createUnlinkTmpFile(replica, false, false); // copy meta file - - cluster.restartDataNodes(); - cluster.waitActive(); - dn = cluster.getDataNodes().get(0); - - // check volumeMap: 4 finalized replica - Collection replicas = dataset(dn).volumeMap.replicas(bpid); - Assert.assertEquals(4, replicas.size()); - replicasItor = replicas.iterator(); - while (replicasItor.hasNext()) { - Assert.assertEquals(ReplicaState.FINALIZED, - replicasItor.next().getState()); - } - } finally { - cluster.shutdown(); - } - } - private static FsDatasetImpl dataset(DataNode dn) { return (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn); } - - private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, - boolean changeBlockFile, - boolean isRename) throws IOException { - File src; - if (changeBlockFile) { - src = replicaInfo.getBlockFile(); - } else { - src = replicaInfo.getMetaFile(); - } - File dst = DatanodeUtil.getUnlinkTmpFile(src); - if (isRename) { - src.renameTo(dst); - } else { - FileInputStream in = new FileInputStream(src); - try { - FileOutputStream out = new FileOutputStream(dst); - try { - IOUtils.copyBytes(in, out, 1); - } finally { - out.close(); - } - } finally { - in.close(); - } - } - } }