HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one. Contributed by Dongming Liang.

This commit is contained in:
Dongming Liang 2015-03-04 17:47:05 -08:00 committed by Konstantin V Shvachko
parent 1749094fab
commit ada545d3b7
11 changed files with 19 additions and 12 deletions

View File

@ -401,6 +401,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7535. Utilize Snapshot diff report for distcp. (jing9)
HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one.
(Dongming Liang via shv)
OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FSInputChecker;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
@ -351,7 +352,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
DatanodeID datanodeID, PeerCache peerCache) {
// Path is used only for printing block and file information in debug
super(new Path("/blk_" + blockId + ":" + bpid + ":of:"+ file)/*too non path-like?*/,
super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
":" + bpid + ":of:"+ file)/*too non path-like?*/,
1, verifyChecksum,
checksum.getChecksumSize() > 0? checksum : null,
checksum.getBytesPerChecksum(),

View File

@ -83,7 +83,6 @@ import java.util.concurrent.Future;
public class DataStorage extends Storage {
public final static String BLOCK_SUBDIR_PREFIX = "subdir";
final static String BLOCK_FILE_PREFIX = "blk_";
final static String COPY_FILE_PREFIX = "dncp_";
final static String STORAGE_DIR_DETACHED = "detach";
public final static String STORAGE_DIR_RBW = "rbw";
@ -1250,7 +1249,7 @@ public class DataStorage extends Storage {
String[] blockNames = from.list(new java.io.FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.startsWith(BLOCK_FILE_PREFIX);
return name.startsWith(Block.BLOCK_FILE_PREFIX);
}
});

View File

@ -633,7 +633,7 @@ public class DirectoryScanner implements Runnable {
continue;
}
if (!Block.isBlockFilename(files[i])) {
if (isBlockMetaFile("blk_", files[i].getName())) {
if (isBlockMetaFile(Block.BLOCK_FILE_PREFIX, files[i].getName())) {
long blockId = Block.getBlockId(files[i].getName());
verifyFileLocation(files[i].getParentFile(), bpFinalizedDir,
blockId);

View File

@ -430,7 +430,8 @@ public class FsVolumeImpl implements FsVolumeSpi {
@Override
public boolean accept(File dir, String name) {
return !name.endsWith(".meta") && name.startsWith("blk_");
return !name.endsWith(".meta") &&
name.startsWith(Block.BLOCK_FILE_PREFIX);
}
}

View File

@ -299,7 +299,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
out.println(sb.toString());
sb.append(" for blockIds: \n");
for (String blk: blocks) {
if(blk == null || !blk.contains("blk_")) {
if(blk == null || !blk.contains(Block.BLOCK_FILE_PREFIX)) {
out.println("Incorrect blockId format: " + blk);
continue;
}

View File

@ -2567,8 +2567,8 @@ public class MiniDFSCluster {
return null;
}
for (File f : files) {
if (f.getName().startsWith("blk_") && f.getName().endsWith(
Block.METADATA_EXTENSION)) {
if (f.getName().startsWith(Block.BLOCK_FILE_PREFIX) &&
f.getName().endsWith(Block.METADATA_EXTENSION)) {
results.add(f);
} else if (f.isDirectory()) {
List<File> subdirResults = getAllBlockMetadataFiles(f);

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClientFaultInjector;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.io.IOUtils;
import org.junit.Before;
@ -176,7 +177,7 @@ public class TestCrcCorruption {
assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
int num = 0;
for (int idx = 0; idx < blocks.length; idx++) {
if (blocks[idx].getName().startsWith("blk_") &&
if (blocks[idx].getName().startsWith(Block.BLOCK_FILE_PREFIX) &&
blocks[idx].getName().endsWith(".meta")) {
num++;
if (num % 3 == 0) {

View File

@ -77,7 +77,7 @@ public class TestFileCorruption {
File[] blocks = data_dir.listFiles();
assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
for (int idx = 0; idx < blocks.length; idx++) {
if (!blocks[idx].getName().startsWith("blk_")) {
if (!blocks[idx].getName().startsWith(Block.BLOCK_FILE_PREFIX)) {
continue;
}
System.out.println("Deliberately removing file "+blocks[idx].getName());

View File

@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.util.Time;
import org.junit.Test;
@ -520,7 +521,7 @@ public class TestReplication {
String blockFile = null;
File[] listFiles = participatedNodeDirs.listFiles();
for (File file : listFiles) {
if (file.getName().startsWith("blk_")
if (file.getName().startsWith(Block.BLOCK_FILE_PREFIX)
&& !file.getName().endsWith("meta")) {
blockFile = file.getName();
for (File file1 : nonParticipatedNodeDirs) {

View File

@ -319,7 +319,7 @@ public class TestDataNodeVolumeFailure {
private boolean deteteBlocks(File dir) {
File [] fileList = dir.listFiles();
for(File f : fileList) {
if(f.getName().startsWith("blk_")) {
if(f.getName().startsWith(Block.BLOCK_FILE_PREFIX)) {
if(!f.delete())
return false;