HDFS-8546. Use try with resources in DataStorage and Storage.
(cherry picked from commit 1403b84b12
)
This commit is contained in:
parent
ba406b723d
commit
84fdd4a3a0
|
@ -338,6 +338,8 @@ Release 2.8.0 - UNRELEASED
|
||||||
|
|
||||||
HDFS-8665. Fix replication check in DFSTestUtils#waitForReplication. (wang)
|
HDFS-8665. Fix replication check in DFSTestUtils#waitForReplication. (wang)
|
||||||
|
|
||||||
|
HDFS-8546. Use try with resources in DataStorage and Storage. (wang)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||||
|
|
|
@ -709,6 +709,7 @@ public abstract class Storage extends StorageInfo {
|
||||||
try {
|
try {
|
||||||
res = file.getChannel().tryLock();
|
res = file.getChannel().tryLock();
|
||||||
if (null == res) {
|
if (null == res) {
|
||||||
|
LOG.error("Unable to acquire file lock on path " + lockF.toString());
|
||||||
throw new OverlappingFileLockException();
|
throw new OverlappingFileLockException();
|
||||||
}
|
}
|
||||||
file.write(jvmName.getBytes(Charsets.UTF_8));
|
file.write(jvmName.getBytes(Charsets.UTF_8));
|
||||||
|
@ -971,35 +972,28 @@ public abstract class Storage extends StorageInfo {
|
||||||
public void writeProperties(File to, StorageDirectory sd) throws IOException {
|
public void writeProperties(File to, StorageDirectory sd) throws IOException {
|
||||||
Properties props = new Properties();
|
Properties props = new Properties();
|
||||||
setPropertiesFromFields(props, sd);
|
setPropertiesFromFields(props, sd);
|
||||||
writeProperties(to, sd, props);
|
writeProperties(to, props);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void writeProperties(File to, StorageDirectory sd,
|
public static void writeProperties(File to, Properties props)
|
||||||
Properties props) throws IOException {
|
throws IOException {
|
||||||
RandomAccessFile file = new RandomAccessFile(to, "rws");
|
try (RandomAccessFile file = new RandomAccessFile(to, "rws");
|
||||||
FileOutputStream out = null;
|
FileOutputStream out = new FileOutputStream(file.getFD())) {
|
||||||
try {
|
|
||||||
file.seek(0);
|
file.seek(0);
|
||||||
out = new FileOutputStream(file.getFD());
|
|
||||||
/*
|
/*
|
||||||
* If server is interrupted before this line,
|
* If server is interrupted before this line,
|
||||||
* the version file will remain unchanged.
|
* the version file will remain unchanged.
|
||||||
*/
|
*/
|
||||||
props.store(out, null);
|
props.store(out, null);
|
||||||
/*
|
/*
|
||||||
* Now the new fields are flushed to the head of the file, but file
|
* Now the new fields are flushed to the head of the file, but file
|
||||||
* length can still be larger then required and therefore the file can
|
* length can still be larger then required and therefore the file can
|
||||||
* contain whole or corrupted fields from its old contents in the end.
|
* contain whole or corrupted fields from its old contents in the end.
|
||||||
* If server is interrupted here and restarted later these extra fields
|
* If server is interrupted here and restarted later these extra fields
|
||||||
* either should not effect server behavior or should be handled
|
* either should not effect server behavior or should be handled
|
||||||
* by the server correctly.
|
* by the server correctly.
|
||||||
*/
|
*/
|
||||||
file.setLength(out.getChannel().position());
|
file.setLength(out.getChannel().position());
|
||||||
} finally {
|
|
||||||
if (out != null) {
|
|
||||||
out.close();
|
|
||||||
}
|
|
||||||
file.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,17 +44,15 @@ import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
|
||||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
import org.apache.hadoop.util.DiskChecker;
|
import org.apache.hadoop.util.DiskChecker;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileInputStream;
|
|
||||||
import java.io.FileOutputStream;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.RandomAccessFile;
|
import java.io.RandomAccessFile;
|
||||||
import java.nio.channels.FileLock;
|
import java.nio.channels.FileLock;
|
||||||
|
import java.nio.channels.OverlappingFileLockException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -82,7 +80,6 @@ import java.util.concurrent.Future;
|
||||||
public class DataStorage extends Storage {
|
public class DataStorage extends Storage {
|
||||||
|
|
||||||
public final static String BLOCK_SUBDIR_PREFIX = "subdir";
|
public final static String BLOCK_SUBDIR_PREFIX = "subdir";
|
||||||
final static String COPY_FILE_PREFIX = "dncp_";
|
|
||||||
final static String STORAGE_DIR_DETACHED = "detach";
|
final static String STORAGE_DIR_DETACHED = "detach";
|
||||||
public final static String STORAGE_DIR_RBW = "rbw";
|
public final static String STORAGE_DIR_RBW = "rbw";
|
||||||
public final static String STORAGE_DIR_FINALIZED = "finalized";
|
public final static String STORAGE_DIR_FINALIZED = "finalized";
|
||||||
|
@ -614,20 +611,22 @@ public class DataStorage extends Storage {
|
||||||
@Override
|
@Override
|
||||||
public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
|
public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
|
||||||
File oldF = new File(sd.getRoot(), "storage");
|
File oldF = new File(sd.getRoot(), "storage");
|
||||||
if (!oldF.exists())
|
if (!oldF.exists()) {
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
// check the layout version inside the storage file
|
// check the layout version inside the storage file
|
||||||
// Lock and Read old storage file
|
// Lock and Read old storage file
|
||||||
RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
|
try (RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
|
||||||
FileLock oldLock = oldFile.getChannel().tryLock();
|
FileLock oldLock = oldFile.getChannel().tryLock()) {
|
||||||
try {
|
if (null == oldLock) {
|
||||||
|
LOG.error("Unable to acquire file lock on path " + oldF.toString());
|
||||||
|
throw new OverlappingFileLockException();
|
||||||
|
}
|
||||||
oldFile.seek(0);
|
oldFile.seek(0);
|
||||||
int oldVersion = oldFile.readInt();
|
int oldVersion = oldFile.readInt();
|
||||||
if (oldVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
|
if (oldVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION) {
|
||||||
return false;
|
return false;
|
||||||
} finally {
|
}
|
||||||
oldLock.release();
|
|
||||||
oldFile.close();
|
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -1218,23 +1217,8 @@ public class DataStorage extends Storage {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!from.isDirectory()) {
|
if (!from.isDirectory()) {
|
||||||
if (from.getName().startsWith(COPY_FILE_PREFIX)) {
|
HardLink.createHardLink(from, to);
|
||||||
FileInputStream in = new FileInputStream(from);
|
hl.linkStats.countSingleLinks++;
|
||||||
try {
|
|
||||||
FileOutputStream out = new FileOutputStream(to);
|
|
||||||
try {
|
|
||||||
IOUtils.copyBytes(in, out, 16*1024);
|
|
||||||
hl.linkStats.countPhysicalFileCopies++;
|
|
||||||
} finally {
|
|
||||||
out.close();
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
in.close();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
HardLink.createHardLink(from, to);
|
|
||||||
hl.linkStats.countSingleLinks++;
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// from is a directory
|
// from is a directory
|
||||||
|
@ -1285,8 +1269,7 @@ public class DataStorage extends Storage {
|
||||||
String[] otherNames = from.list(new java.io.FilenameFilter() {
|
String[] otherNames = from.list(new java.io.FilenameFilter() {
|
||||||
@Override
|
@Override
|
||||||
public boolean accept(File dir, String name) {
|
public boolean accept(File dir, String name) {
|
||||||
return name.startsWith(BLOCK_SUBDIR_PREFIX)
|
return name.startsWith(BLOCK_SUBDIR_PREFIX);
|
||||||
|| name.startsWith(COPY_FILE_PREFIX);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
for(int i = 0; i < otherNames.length; i++)
|
for(int i = 0; i < otherNames.length; i++)
|
||||||
|
|
Loading…
Reference in New Issue