HDFS-5752. Add a new DFSAdmin command to query, start and finalize rolling upgrade.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1558299 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2014-01-15 05:56:17 +00:00
parent 1c7aa44d62
commit 5fe73a986e
14 changed files with 397 additions and 26 deletions

View File

@ -4,3 +4,6 @@ HDFS-5535 subtasks:
jing9)
HDFS-5645. Support upgrade marker in editlog streams. (szetszwo)
HDFS-5752. Add a new DFSAdmin command to query, start and finalize rolling
upgrade. (szetszwo)

View File

@ -124,11 +124,13 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@ -2450,6 +2452,10 @@ public class DFSClient implements java.io.Closeable {
namenode.finalizeUpgrade();
}
RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException {
return namenode.rollingUpgrade(action);
}
/**
*/
@Deprecated

View File

@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -66,11 +67,12 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
@ -991,6 +993,14 @@ public class DistributedFileSystem extends FileSystem {
dfs.finalizeUpgrade();
}
/**
* Rolling upgrade: start/finalize/query.
*/
public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
throws IOException {
return dfs.rollingUpgrade(action);
}
/*
* Requests the namenode to dump data strcutures into specified
* file.

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
@ -760,6 +761,15 @@ public interface ClientProtocol {
@Idempotent
public void finalizeUpgrade() throws IOException;
/**
* Rolling upgrade operations.
* @param action either query, start or finailze.
* @return rolling upgrade information.
*/
@Idempotent
public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
throws IOException;
/**
* @return CorruptFileBlocks, containing a list of corrupt files (with
* duplicates if there is more than one corrupt block in a file)

View File

@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hdfs.protocol;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -78,6 +81,24 @@ public class HdfsConstants {
SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET;
}
public static enum RollingUpgradeAction {
QUERY, START, FINALIZE;
private static final Map<String, RollingUpgradeAction> MAP
= new HashMap<String, RollingUpgradeAction>();
static {
MAP.put("", QUERY);
for(RollingUpgradeAction a : values()) {
MAP.put(a.name(), a);
}
}
/** Covert the given String to a RollingUpgradeAction. */
public static RollingUpgradeAction fromString(String s) {
return MAP.get(s.toUpperCase());
}
}
// type of the datanode report
public static enum DatanodeReportType {
ALL, LIVE, DEAD

View File

@ -0,0 +1,65 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Rolling upgrade information
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class RollingUpgradeInfo {
private long startTime;
private long finalizeTime;
public RollingUpgradeInfo(long startTime, long finalizeTime) {
this.startTime = startTime;
this.finalizeTime = finalizeTime;
}
public boolean isStarted() {
return startTime != 0;
}
/** @return The rolling upgrade starting time. */
public long getStartTime() {
return startTime;
}
public boolean isFinalized() {
return finalizeTime != 0;
}
public long getFinalizeTime() {
return finalizeTime;
}
@Override
public String toString() {
return " Start Time: " + (startTime == 0? "<NOT STARTED>": timestamp2String(startTime))
+ "\n Finalize Time: " + (finalizeTime == 0? "<NOT FINALIZED>": timestamp2String(finalizeTime));
}
private static String timestamp2String(long timestamp) {
return new Date(timestamp) + " (=" + timestamp + ")";
}
}

View File

@ -22,11 +22,12 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@ -34,17 +35,17 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
@ -100,28 +101,28 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
@ -136,6 +137,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Restor
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto;
@ -681,6 +684,20 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
}
}
@Override
public RollingUpgradeResponseProto rollingUpgrade(RpcController controller,
RollingUpgradeRequestProto req) throws ServiceException {
try {
final RollingUpgradeInfo info = server.rollingUpgrade(
PBHelper.convert(req.getAction()));
return RollingUpgradeResponseProto.newBuilder()
.setRollingUpgradeInfo(PBHelper.convert(info))
.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public ListCorruptFileBlocksResponseProto listCorruptFileBlocks(
RpcController controller, ListCorruptFileBlocksRequestProto req)

View File

@ -37,8 +37,9 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@ -47,18 +48,19 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
@ -97,19 +99,19 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
@ -118,6 +120,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Report
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollEditsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SaveNamespaceRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetBalancerBandwidthRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetOwnerRequestProto;
@ -635,6 +640,19 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
}
@Override
public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException {
final RollingUpgradeRequestProto r = RollingUpgradeRequestProto.newBuilder()
.setAction(PBHelper.convert(action)).build();
try {
final RollingUpgradeResponseProto proto = rpcProxy.rollingUpgrade(null, r);
final RollingUpgradeInfoProto info = proto.getRollingUpgradeInfo();
return new RollingUpgradeInfo(info.getStartTime(), info.getFinalizeTime());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException {

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
@ -47,23 +48,25 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheFlagProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
@ -72,8 +75,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheP
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto;
@ -1421,6 +1425,39 @@ public class PBHelper {
}
}
public static RollingUpgradeActionProto convert(RollingUpgradeAction a) {
switch (a) {
case QUERY:
return RollingUpgradeActionProto.QUERY;
case START:
return RollingUpgradeActionProto.START;
case FINALIZE:
return RollingUpgradeActionProto.FINALIZE;
default:
throw new IllegalArgumentException("Unexpected value: " + a);
}
}
public static RollingUpgradeAction convert(RollingUpgradeActionProto a) {
switch (a) {
case QUERY:
return RollingUpgradeAction.QUERY;
case START:
return RollingUpgradeAction.START;
case FINALIZE:
return RollingUpgradeAction.FINALIZE;
default:
throw new IllegalArgumentException("Unexpected value: " + a);
}
}
public static RollingUpgradeInfoProto convert(RollingUpgradeInfo info) {
return RollingUpgradeInfoProto.newBuilder()
.setStartTime(info.getStartTime())
.setFinalizeTime(info.getFinalizeTime())
.build();
}
public static CorruptFileBlocks convert(CorruptFileBlocksProto c) {
if (c == null)
return null;

View File

@ -168,6 +168,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@ -7053,13 +7054,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
}
void addUpgradeMarker() throws IOException {
RollingUpgradeInfo addUpgradeMarker() throws IOException {
final long startTime;
checkSuperuserPrivilege();
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
startTime = now();
getEditLog().logUpgradeMarker();
} finally {
writeUnlock();
@ -7069,6 +7072,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "upgrade", null, null, null);
}
return new RollingUpgradeInfo(startTime, 0L);
}
long addCacheDirective(CacheDirectiveInfo directive, EnumSet<CacheFlag> flags)

View File

@ -37,6 +37,7 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.ContentSummary;
@ -48,7 +49,6 @@ import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.ha.HAServiceStatus;
@ -64,8 +64,8 @@ import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@ -74,6 +74,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -81,6 +82,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
@ -858,6 +860,18 @@ class NameNodeRpcServer implements NamenodeProtocols {
namesystem.finalizeUpgrade();
}
@Override // ClientProtocol
public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException {
LOG.info("rollingUpgrade " + action);
switch(action) {
case START:
return namesystem.addUpgradeMarker();
default:
// TODO: support other actions.
throw new UnsupportedActionException(action + " is not yet supported.");
}
}
@Override // ClientProtocol
public void metaSave(String filename) throws IOException {
namesystem.metaSave(filename);

View File

@ -49,7 +49,9 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
@ -63,6 +65,8 @@ import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Preconditions;
/**
* This class provides some DFS administrative access shell commands.
*/
@ -271,6 +275,58 @@ public class DFSAdmin extends FsShell {
}
}
private static class RollingUpgradeCommand {
static final String NAME = "rollingUpgrade";
static final String USAGE = "-"+NAME+" [<query|start|finalize>]";
static final String DESCRIPTION = USAGE + ":\n"
+ " query: query current rolling upgrade status.\n"
+ " start: start rolling upgrade."
+ " finalize: finalize rolling upgrade.";
/** Check if a command is the rollingUpgrade command
*
* @param cmd A string representation of a command starting with "-"
* @return true if this is a clrQuota command; false otherwise
*/
static boolean matches(String cmd) {
return ("-"+NAME).equals(cmd);
}
static int run(DistributedFileSystem dfs, String[] argv, int idx) throws IOException {
final RollingUpgradeAction action = RollingUpgradeAction.fromString(
argv.length >= 2? argv[1]: "");
if (action == null) {
throw new IllegalArgumentException("Failed to covert \"" + argv[1]
+"\" to " + RollingUpgradeAction.class.getSimpleName());
}
System.out.println(action + " rolling upgrade ...");
final RollingUpgradeInfo info = dfs.rollingUpgrade(action);
switch(action){
case QUERY:
if (info.isStarted()) {
System.out.println("Rolling upgrade is in progress:");
System.out.println(info);
} else {
System.out.println("There is no rolling upgrade in progress.");
}
break;
case START:
Preconditions.checkState(info.isStarted());
System.out.println("Rolling upgrade is started:");
System.out.println(info);
break;
case FINALIZE:
Preconditions.checkState(info.isFinalized());
System.out.println("Rolling upgrade is finalized:");
System.out.println(info);
break;
}
return 0;
}
}
/**
* Construct a DFSAdmin object.
*/
@ -575,9 +631,11 @@ public class DFSAdmin extends FsShell {
"\t[" + ClearQuotaCommand.USAGE +"]\n" +
"\t[" + SetSpaceQuotaCommand.USAGE + "]\n" +
"\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" +
"\t[-finalizeUpgrade]\n" +
"\t[" + RollingUpgradeCommand.USAGE +"]\n" +
"\t[-refreshServiceAcl]\n" +
"\t[-refreshUserToGroupsMappings]\n" +
"\t[refreshSuperUserGroupsConfiguration]\n" +
"\t[-refreshSuperUserGroupsConfiguration]\n" +
"\t[-printTopology]\n" +
"\t[-refreshNamenodes datanodehost:port]\n"+
"\t[-deleteBlockPool datanodehost:port blockpoolId [force]]\n"+
@ -699,6 +757,8 @@ public class DFSAdmin extends FsShell {
System.out.println(refreshNodes);
} else if ("finalizeUpgrade".equals(cmd)) {
System.out.println(finalizeUpgrade);
} else if (RollingUpgradeCommand.matches("-"+cmd)) {
System.out.println(RollingUpgradeCommand.DESCRIPTION);
} else if ("metasave".equals(cmd)) {
System.out.println(metaSave);
} else if (SetQuotaCommand.matches("-"+cmd)) {
@ -740,6 +800,7 @@ public class DFSAdmin extends FsShell {
System.out.println(restoreFailedStorage);
System.out.println(refreshNodes);
System.out.println(finalizeUpgrade);
System.out.println(RollingUpgradeCommand.DESCRIPTION);
System.out.println(metaSave);
System.out.println(SetQuotaCommand.DESCRIPTION);
System.out.println(ClearQuotaCommand.DESCRIPTION);
@ -952,6 +1013,9 @@ public class DFSAdmin extends FsShell {
} else if ("-finalizeUpgrade".equals(cmd)) {
System.err.println("Usage: java DFSAdmin"
+ " [-finalizeUpgrade]");
} else if (RollingUpgradeCommand.matches(cmd)) {
System.err.println("Usage: java DFSAdmin"
+ " [" + RollingUpgradeCommand.USAGE+"]");
} else if ("-metasave".equals(cmd)) {
System.err.println("Usage: java DFSAdmin"
+ " [-metasave filename]");
@ -1003,6 +1067,7 @@ public class DFSAdmin extends FsShell {
System.err.println(" [-restoreFailedStorage true|false|check]");
System.err.println(" [-refreshNodes]");
System.err.println(" [-finalizeUpgrade]");
System.err.println(" ["+RollingUpgradeCommand.USAGE+"]");
System.err.println(" [-metasave filename]");
System.err.println(" [-refreshServiceAcl]");
System.err.println(" [-refreshUserToGroupsMappings]");
@ -1087,6 +1152,11 @@ public class DFSAdmin extends FsShell {
printUsage(cmd);
return exitCode;
}
} else if (RollingUpgradeCommand.matches(cmd)) {
if (argv.length < 1 || argv.length > 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-metasave".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
@ -1162,6 +1232,8 @@ public class DFSAdmin extends FsShell {
exitCode = refreshNodes();
} else if ("-finalizeUpgrade".equals(cmd)) {
exitCode = finalizeUpgrade();
} else if (RollingUpgradeCommand.matches(cmd)) {
exitCode = RollingUpgradeCommand.run(getDFS(), argv, i);
} else if ("-metasave".equals(cmd)) {
exitCode = metaSave(argv, i);
} else if (ClearQuotaCommand.matches(cmd)) {

View File

@ -332,6 +332,25 @@ message FinalizeUpgradeRequestProto { // no parameters
message FinalizeUpgradeResponseProto { // void response
}
enum RollingUpgradeActionProto {
QUERY = 1;
START = 2;
FINALIZE = 3;
}
message RollingUpgradeRequestProto {
required RollingUpgradeActionProto action = 1;
}
message RollingUpgradeInfoProto {
required uint64 startTime = 1;
required uint64 finalizeTime = 2;
}
message RollingUpgradeResponseProto {
required RollingUpgradeInfoProto rollingUpgradeInfo= 1;
}
message ListCorruptFileBlocksRequestProto {
required string path = 1;
optional string cookie = 2;
@ -658,6 +677,8 @@ service ClientNamenodeProtocol {
rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto);
rpc finalizeUpgrade(FinalizeUpgradeRequestProto)
returns(FinalizeUpgradeResponseProto);
rpc rollingUpgrade(RollingUpgradeRequestProto)
returns(RollingUpgradeResponseProto);
rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto)
returns(ListCorruptFileBlocksResponseProto);
rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);

View File

@ -0,0 +1,73 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.junit.Assert;
import org.junit.Test;
/**
* This class tests rolling upgrade.
*/
public class TestRollingUpgrade {
/**
* Test DFSAdmin Upgrade Command.
*/
@Test
public void testDFSAdminRollingUpgradeCommand() throws Exception {
// start a cluster
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
{
final DistributedFileSystem dfs = cluster.getFileSystem();
final DFSAdmin dfsadmin = new DFSAdmin(conf);
dfs.mkdirs(foo);
{
//start rolling upgrade
final String[] args = {"-rollingUpgrade", "start"};
dfsadmin.run(args);
}
dfs.mkdirs(bar);
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
}
cluster.restartNameNode();
{
final DistributedFileSystem dfs = cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
}
} finally {
if(cluster != null) cluster.shutdown();
}
}
}