HDFS-5784. Reserve space in edit log header and fsimage header for feature flag section (cmccabe)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1558974 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
ccee5b3e44
commit
a7ec44d41b
|
@ -522,6 +522,9 @@ Release 2.4.0 - UNRELEASED
|
||||||
as a collection of storages (see breakdown of tasks below for features and
|
as a collection of storages (see breakdown of tasks below for features and
|
||||||
contributors).
|
contributors).
|
||||||
|
|
||||||
|
HDFS-5784. reserve space in edit log header and fsimage header for feature
|
||||||
|
flag section (cmccabe)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
||||||
HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
|
HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
|
||||||
|
|
|
@ -0,0 +1,64 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
|
import java.io.DataInputStream;
|
||||||
|
import java.io.DataOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
||||||
|
import com.google.common.base.Joiner;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.collect.ImmutableSet;
|
||||||
|
import com.google.common.collect.Sets;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* LayoutFlags represent features which the FSImage and edit logs can either
|
||||||
|
* support or not, independently of layout version.
|
||||||
|
*
|
||||||
|
* Note: all flags starting with 'test' are reserved for unit test purposes.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public class LayoutFlags {
|
||||||
|
/**
|
||||||
|
* Load a LayoutFlags object from a stream.
|
||||||
|
*
|
||||||
|
* @param in The stream to read from.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static LayoutFlags read(DataInputStream in)
|
||||||
|
throws IOException {
|
||||||
|
int length = in.readInt();
|
||||||
|
if (length < 0) {
|
||||||
|
throw new IOException("The length of the feature flag section " +
|
||||||
|
"was negative at " + length + " bytes.");
|
||||||
|
} else if (length > 0) {
|
||||||
|
throw new IOException("Found feature flags which we can't handle. " +
|
||||||
|
"Please upgrade your software.");
|
||||||
|
}
|
||||||
|
return new LayoutFlags();
|
||||||
|
}
|
||||||
|
|
||||||
|
private LayoutFlags() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void write(DataOutputStream out) throws IOException {
|
||||||
|
out.writeInt(0);
|
||||||
|
}
|
||||||
|
}
|
|
@ -111,7 +111,8 @@ public class LayoutVersion {
|
||||||
+ "the new block instead of the entire block list"),
|
+ "the new block instead of the entire block list"),
|
||||||
CACHING(-49, "Support for cache pools and path-based caching"),
|
CACHING(-49, "Support for cache pools and path-based caching"),
|
||||||
ADD_DATANODE_AND_STORAGE_UUIDS(-50, "Replace StorageID with DatanodeUuid."
|
ADD_DATANODE_AND_STORAGE_UUIDS(-50, "Replace StorageID with DatanodeUuid."
|
||||||
+ " Use distinct StorageUuid per storage directory.");
|
+ " Use distinct StorageUuid per storage directory."),
|
||||||
|
ADD_LAYOUT_FLAGS(-51, "Add support for layout flags.");
|
||||||
|
|
||||||
final int lv;
|
final int lv;
|
||||||
final int ancestorLV;
|
final int ancestorLV;
|
||||||
|
|
|
@ -34,6 +34,9 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException;
|
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException;
|
||||||
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
||||||
|
@ -146,6 +149,14 @@ public class EditLogFileInputStream extends EditLogInputStream {
|
||||||
} catch (EOFException eofe) {
|
} catch (EOFException eofe) {
|
||||||
throw new LogHeaderCorruptException("No header found in log");
|
throw new LogHeaderCorruptException("No header found in log");
|
||||||
}
|
}
|
||||||
|
if (LayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, logVersion)) {
|
||||||
|
try {
|
||||||
|
LayoutFlags.read(dataIn);
|
||||||
|
} catch (EOFException eofe) {
|
||||||
|
throw new LogHeaderCorruptException("EOF while reading layout " +
|
||||||
|
"flags from log");
|
||||||
|
}
|
||||||
|
}
|
||||||
reader = new FSEditLogOp.Reader(dataIn, tracker, logVersion);
|
reader = new FSEditLogOp.Reader(dataIn, tracker, logVersion);
|
||||||
reader.setMaxOpSize(maxOpSize);
|
reader.setMaxOpSize(maxOpSize);
|
||||||
state = State.OPEN;
|
state = State.OPEN;
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
@ -132,6 +133,7 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public static void writeHeader(DataOutputStream out) throws IOException {
|
public static void writeHeader(DataOutputStream out) throws IOException {
|
||||||
out.writeInt(HdfsConstants.LAYOUT_VERSION);
|
out.writeInt(HdfsConstants.LAYOUT_VERSION);
|
||||||
|
LayoutFlags.write(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -48,6 +48,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
|
@ -261,6 +262,9 @@ public class FSImageFormat {
|
||||||
}
|
}
|
||||||
boolean supportSnapshot = LayoutVersion.supports(Feature.SNAPSHOT,
|
boolean supportSnapshot = LayoutVersion.supports(Feature.SNAPSHOT,
|
||||||
imgVersion);
|
imgVersion);
|
||||||
|
if (LayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, imgVersion)) {
|
||||||
|
LayoutFlags.read(in);
|
||||||
|
}
|
||||||
|
|
||||||
// read namespaceID: first appeared in version -2
|
// read namespaceID: first appeared in version -2
|
||||||
in.readInt();
|
in.readInt();
|
||||||
|
@ -990,6 +994,7 @@ public class FSImageFormat {
|
||||||
DataOutputStream out = new DataOutputStream(fos);
|
DataOutputStream out = new DataOutputStream(fos);
|
||||||
try {
|
try {
|
||||||
out.writeInt(HdfsConstants.LAYOUT_VERSION);
|
out.writeInt(HdfsConstants.LAYOUT_VERSION);
|
||||||
|
LayoutFlags.write(out);
|
||||||
// We use the non-locked version of getNamespaceInfo here since
|
// We use the non-locked version of getNamespaceInfo here since
|
||||||
// the coordinating thread of saveNamespace already has read-locked
|
// the coordinating thread of saveNamespace already has read-locked
|
||||||
// the namespace for us. If we attempt to take another readlock
|
// the namespace for us. If we attempt to take another readlock
|
||||||
|
|
|
@ -28,6 +28,7 @@ import java.util.Map;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
|
@ -126,7 +127,7 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
new SimpleDateFormat("yyyy-MM-dd HH:mm");
|
new SimpleDateFormat("yyyy-MM-dd HH:mm");
|
||||||
private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
|
private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
|
||||||
-24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
|
-24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
|
||||||
-40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50 };
|
-40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50, -51 };
|
||||||
private int imageVersion = 0;
|
private int imageVersion = 0;
|
||||||
|
|
||||||
private final Map<Long, Boolean> subtreeMap = new HashMap<Long, Boolean>();
|
private final Map<Long, Boolean> subtreeMap = new HashMap<Long, Boolean>();
|
||||||
|
@ -157,6 +158,9 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
imageVersion = in.readInt();
|
imageVersion = in.readInt();
|
||||||
if( !canLoadVersion(imageVersion))
|
if( !canLoadVersion(imageVersion))
|
||||||
throw new IOException("Cannot process fslayout version " + imageVersion);
|
throw new IOException("Cannot process fslayout version " + imageVersion);
|
||||||
|
if (LayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, imageVersion)) {
|
||||||
|
LayoutFlags.read(in);
|
||||||
|
}
|
||||||
|
|
||||||
v.visit(ImageElement.IMAGE_VERSION, imageVersion);
|
v.visit(ImageElement.IMAGE_VERSION, imageVersion);
|
||||||
v.visit(ImageElement.NAMESPACE_ID, in.readInt());
|
v.visit(ImageElement.NAMESPACE_ID, in.readInt());
|
||||||
|
|
|
@ -191,6 +191,7 @@ public class TestJournalNode {
|
||||||
"/getJournal?segmentTxId=1&jid=" + journalId));
|
"/getJournal?segmentTxId=1&jid=" + journalId));
|
||||||
byte[] expected = Bytes.concat(
|
byte[] expected = Bytes.concat(
|
||||||
Ints.toByteArray(HdfsConstants.LAYOUT_VERSION),
|
Ints.toByteArray(HdfsConstants.LAYOUT_VERSION),
|
||||||
|
(new byte[] { 0, 0, 0, 0 }), // layout flags section
|
||||||
EDITS_DATA);
|
EDITS_DATA);
|
||||||
|
|
||||||
assertArrayEquals(expected, retrievedViaHttp);
|
assertArrayEquals(expected, retrievedViaHttp);
|
||||||
|
|
|
@ -377,8 +377,9 @@ public class TestFSEditLogLoader {
|
||||||
File testDir = new File(TEST_DIR, "testValidateEmptyEditLog");
|
File testDir = new File(TEST_DIR, "testValidateEmptyEditLog");
|
||||||
SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
|
SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
|
||||||
File logFile = prepareUnfinalizedTestEditLog(testDir, 0, offsetToTxId);
|
File logFile = prepareUnfinalizedTestEditLog(testDir, 0, offsetToTxId);
|
||||||
// Truncate the file so that there is nothing except the header
|
// Truncate the file so that there is nothing except the header and
|
||||||
truncateFile(logFile, 4);
|
// layout flags section.
|
||||||
|
truncateFile(logFile, 8);
|
||||||
EditLogValidation validation =
|
EditLogValidation validation =
|
||||||
EditLogFileInputStream.validateEditLog(logFile);
|
EditLogFileInputStream.validateEditLog(logFile);
|
||||||
assertTrue(!validation.hasCorruptHeader());
|
assertTrue(!validation.hasCorruptHeader());
|
||||||
|
|
Binary file not shown.
|
@ -1,6 +1,6 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<EDITS>
|
<EDITS>
|
||||||
<EDITS_VERSION>-50</EDITS_VERSION>
|
<EDITS_VERSION>-51</EDITS_VERSION>
|
||||||
<RECORD>
|
<RECORD>
|
||||||
<OPCODE>OP_START_LOG_SEGMENT</OPCODE>
|
<OPCODE>OP_START_LOG_SEGMENT</OPCODE>
|
||||||
<DATA>
|
<DATA>
|
||||||
|
|
Loading…
Reference in New Issue