HDFS-7716. Add a test for BlockGroup support in FSImage. Contributed by Takuya Fukudome

This commit is contained in:
Tsz-Wo Nicholas Sze 2015-03-25 19:01:03 +09:00 committed by Zhe Zhang
parent ea2e60fbcc
commit 9d1175b8fb
2 changed files with 58 additions and 1 deletions

View File

@ -1,4 +1,8 @@
BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS
HDFS-7347. Configurable erasure coding policy for individual files and HDFS-7347. Configurable erasure coding policy for individual files and
directories ( Zhe Zhang via vinayakumarb ) directories ( Zhe Zhang via vinayakumarb )
HDFS-7716. Add a test for BlockGroup support in FSImage.
(Takuya Fukudome via szetszwo)

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.File; import java.io.File;
@ -31,7 +32,12 @@ import java.io.ByteArrayInputStream;
import java.io.IOException; import java.io.IOException;
import java.util.EnumSet; import java.util.EnumSet;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.junit.Assert; import org.junit.Assert;
@ -46,6 +52,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -378,4 +385,50 @@ public class TestFSImage {
FileUtil.fullyDelete(dfsDir); FileUtil.fullyDelete(dfsDir);
} }
} }
/**
* Ensure that FSImage supports BlockGroup.
*/
@Test
public void testSupportBlockGroup() throws IOException {
final short GROUP_SIZE = HdfsConstants.NUM_DATA_BLOCKS +
HdfsConstants.NUM_PARITY_BLOCKS;
final int BLOCK_SIZE = 8 * 1024 * 1024;
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE)
.build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
fs.setStoragePolicy(new Path("/"), HdfsConstants.EC_STORAGE_POLICY_NAME);
Path file = new Path("/striped");
FSDataOutputStream out = fs.create(file);
byte[] bytes = DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE);
out.write(bytes);
out.close();
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNodes();
fs = cluster.getFileSystem();
assertTrue(fs.exists(file));
// check the information of striped blocks
FSNamesystem fsn = cluster.getNamesystem();
INodeFile inode = fsn.dir.getINode(file.toString()).asFile();
FileWithStripedBlocksFeature sb = inode.getStripedBlocksFeature();
assertNotNull(sb);
BlockInfoStriped[] blks = sb.getBlocks();
assertEquals(1, blks.length);
assertTrue(blks[0].isStriped());
assertEquals(HdfsConstants.NUM_DATA_BLOCKS, blks[0].getDataBlockNum());
assertEquals(HdfsConstants.NUM_PARITY_BLOCKS, blks[0].getParityBlockNum());
} finally {
cluster.shutdown();
}
}
} }