HDFS-5784. Reserve space in edit log header and fsimage header for feature flag section (cmccabe)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1558974 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Colin McCabe 2014-01-17 01:32:05 +00:00
parent ccee5b3e44
commit a7ec44d41b
11 changed files with 97 additions and 5 deletions

View File

@ -522,6 +522,9 @@ Release 2.4.0 - UNRELEASED
as a collection of storages (see breakdown of tasks below for features and as a collection of storages (see breakdown of tasks below for features and
contributors). contributors).
HDFS-5784. reserve space in edit log header and fsimage header for feature
flag section (cmccabe)
IMPROVEMENTS IMPROVEMENTS
HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu) HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)

View File

@ -0,0 +1,64 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
/**
* LayoutFlags represent features which the FSImage and edit logs can either
* support or not, independently of layout version.
*
* Note: all flags starting with 'test' are reserved for unit test purposes.
*/
@InterfaceAudience.Private
public class LayoutFlags {
/**
* Load a LayoutFlags object from a stream.
*
* @param in The stream to read from.
* @throws IOException
*/
public static LayoutFlags read(DataInputStream in)
throws IOException {
int length = in.readInt();
if (length < 0) {
throw new IOException("The length of the feature flag section " +
"was negative at " + length + " bytes.");
} else if (length > 0) {
throw new IOException("Found feature flags which we can't handle. " +
"Please upgrade your software.");
}
return new LayoutFlags();
}
private LayoutFlags() {
}
public static void write(DataOutputStream out) throws IOException {
out.writeInt(0);
}
}

View File

@ -111,7 +111,8 @@ public static enum Feature {
+ "the new block instead of the entire block list"), + "the new block instead of the entire block list"),
CACHING(-49, "Support for cache pools and path-based caching"), CACHING(-49, "Support for cache pools and path-based caching"),
ADD_DATANODE_AND_STORAGE_UUIDS(-50, "Replace StorageID with DatanodeUuid." ADD_DATANODE_AND_STORAGE_UUIDS(-50, "Replace StorageID with DatanodeUuid."
+ " Use distinct StorageUuid per storage directory."); + " Use distinct StorageUuid per storage directory."),
ADD_LAYOUT_FLAGS(-51, "Add support for layout flags.");
final int lv; final int lv;
final int ancestorLV; final int ancestorLV;

View File

@ -34,6 +34,9 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException;
import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.hdfs.web.URLConnectionFactory;
@ -146,6 +149,14 @@ private void init() throws LogHeaderCorruptException, IOException {
} catch (EOFException eofe) { } catch (EOFException eofe) {
throw new LogHeaderCorruptException("No header found in log"); throw new LogHeaderCorruptException("No header found in log");
} }
if (LayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, logVersion)) {
try {
LayoutFlags.read(dataIn);
} catch (EOFException eofe) {
throw new LogHeaderCorruptException("EOF while reading layout " +
"flags from log");
}
}
reader = new FSEditLogOp.Reader(dataIn, tracker, logVersion); reader = new FSEditLogOp.Reader(dataIn, tracker, logVersion);
reader.setMaxOpSize(maxOpSize); reader.setMaxOpSize(maxOpSize);
state = State.OPEN; state = State.OPEN;

View File

@ -32,6 +32,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
@ -132,6 +133,7 @@ public void create() throws IOException {
@VisibleForTesting @VisibleForTesting
public static void writeHeader(DataOutputStream out) throws IOException { public static void writeHeader(DataOutputStream out) throws IOException {
out.writeInt(HdfsConstants.LAYOUT_VERSION); out.writeInt(HdfsConstants.LAYOUT_VERSION);
LayoutFlags.write(out);
} }
@Override @Override

View File

@ -48,6 +48,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@ -261,6 +262,9 @@ void load(File curFile) throws IOException {
} }
boolean supportSnapshot = LayoutVersion.supports(Feature.SNAPSHOT, boolean supportSnapshot = LayoutVersion.supports(Feature.SNAPSHOT,
imgVersion); imgVersion);
if (LayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, imgVersion)) {
LayoutFlags.read(in);
}
// read namespaceID: first appeared in version -2 // read namespaceID: first appeared in version -2
in.readInt(); in.readInt();
@ -990,6 +994,7 @@ void save(File newFile, FSImageCompression compression) throws IOException {
DataOutputStream out = new DataOutputStream(fos); DataOutputStream out = new DataOutputStream(fos);
try { try {
out.writeInt(HdfsConstants.LAYOUT_VERSION); out.writeInt(HdfsConstants.LAYOUT_VERSION);
LayoutFlags.write(out);
// We use the non-locked version of getNamespaceInfo here since // We use the non-locked version of getNamespaceInfo here since
// the coordinating thread of saveNamespace already has read-locked // the coordinating thread of saveNamespace already has read-locked
// the namespace for us. If we attempt to take another readlock // the namespace for us. If we attempt to take another readlock

View File

@ -28,6 +28,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@ -126,7 +127,7 @@ class ImageLoaderCurrent implements ImageLoader {
new SimpleDateFormat("yyyy-MM-dd HH:mm"); new SimpleDateFormat("yyyy-MM-dd HH:mm");
private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23, private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
-24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
-40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50 }; -40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50, -51 };
private int imageVersion = 0; private int imageVersion = 0;
private final Map<Long, Boolean> subtreeMap = new HashMap<Long, Boolean>(); private final Map<Long, Boolean> subtreeMap = new HashMap<Long, Boolean>();
@ -157,6 +158,9 @@ public void loadImage(DataInputStream in, ImageVisitor v,
imageVersion = in.readInt(); imageVersion = in.readInt();
if( !canLoadVersion(imageVersion)) if( !canLoadVersion(imageVersion))
throw new IOException("Cannot process fslayout version " + imageVersion); throw new IOException("Cannot process fslayout version " + imageVersion);
if (LayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, imageVersion)) {
LayoutFlags.read(in);
}
v.visit(ImageElement.IMAGE_VERSION, imageVersion); v.visit(ImageElement.IMAGE_VERSION, imageVersion);
v.visit(ImageElement.NAMESPACE_ID, in.readInt()); v.visit(ImageElement.NAMESPACE_ID, in.readInt());

View File

@ -191,6 +191,7 @@ public void testHttpServer() throws Exception {
"/getJournal?segmentTxId=1&jid=" + journalId)); "/getJournal?segmentTxId=1&jid=" + journalId));
byte[] expected = Bytes.concat( byte[] expected = Bytes.concat(
Ints.toByteArray(HdfsConstants.LAYOUT_VERSION), Ints.toByteArray(HdfsConstants.LAYOUT_VERSION),
(new byte[] { 0, 0, 0, 0 }), // layout flags section
EDITS_DATA); EDITS_DATA);
assertArrayEquals(expected, retrievedViaHttp); assertArrayEquals(expected, retrievedViaHttp);

View File

@ -377,8 +377,9 @@ public void testValidateEmptyEditLog() throws IOException {
File testDir = new File(TEST_DIR, "testValidateEmptyEditLog"); File testDir = new File(TEST_DIR, "testValidateEmptyEditLog");
SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap(); SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
File logFile = prepareUnfinalizedTestEditLog(testDir, 0, offsetToTxId); File logFile = prepareUnfinalizedTestEditLog(testDir, 0, offsetToTxId);
// Truncate the file so that there is nothing except the header // Truncate the file so that there is nothing except the header and
truncateFile(logFile, 4); // layout flags section.
truncateFile(logFile, 8);
EditLogValidation validation = EditLogValidation validation =
EditLogFileInputStream.validateEditLog(logFile); EditLogFileInputStream.validateEditLog(logFile);
assertTrue(!validation.hasCorruptHeader()); assertTrue(!validation.hasCorruptHeader());

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<EDITS> <EDITS>
<EDITS_VERSION>-50</EDITS_VERSION> <EDITS_VERSION>-51</EDITS_VERSION>
<RECORD> <RECORD>
<OPCODE>OP_START_LOG_SEGMENT</OPCODE> <OPCODE>OP_START_LOG_SEGMENT</OPCODE>
<DATA> <DATA>