diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 25ef7eec286..a0bebede95e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -120,6 +120,8 @@ Trunk (unreleased changes)
HDFS-3803. Change BlockPoolSliceScanner chatty INFO log to DEBUG.
(Andrew Purtell via suresh)
+ HDFS-2686. Remove DistributedUpgrade related code. (suresh)
+
OPTIMIZATIONS
BUG FIXES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 6aadaa9ad8a..6b401be0cbb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -695,8 +695,9 @@ public interface ClientProtocol {
public void finalizeUpgrade() throws IOException;
/**
- * Report distributed upgrade progress or force current upgrade to proceed.
+ * Method no longer used - retained only for backward compatibility
*
+ * Report distributed upgrade progress or force current upgrade to proceed.
* @param action {@link HdfsConstants.UpgradeAction} to perform
* @return upgrade status information or null if no upgrades are in progress
* @throws IOException
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index ed02e5dbb93..b620d922a20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -389,8 +389,8 @@ public class PBHelper {
public static NamespaceInfo convert(NamespaceInfoProto info) {
StorageInfoProto storage = info.getStorageInfo();
return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(),
- info.getBlockPoolID(), storage.getCTime(), info.getDistUpgradeVersion(),
- info.getBuildVersion(), info.getSoftwareVersion());
+ info.getBlockPoolID(), storage.getCTime(), info.getBuildVersion(),
+ info.getSoftwareVersion());
}
public static NamenodeCommand convert(NamenodeCommandProto cmd) {
@@ -898,7 +898,7 @@ public class PBHelper {
return NamespaceInfoProto.newBuilder()
.setBlockPoolID(info.getBlockPoolID())
.setBuildVersion(info.getBuildVersion())
- .setDistUpgradeVersion(info.getDistributedUpgradeVersion())
+ .setUnused(0)
.setStorageInfo(PBHelper.convert((StorageInfo)info))
.setSoftwareVersion(info.getSoftwareVersion()).build();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java
deleted file mode 100644
index 405006bfb18..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.common;
-
-import java.io.IOException;
-import java.util.SortedSet;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-
-/**
- * Generic upgrade manager.
- *
- * {@link #broadcastCommand} is the command that should be
- *
- */
-@InterfaceAudience.Private
-public abstract class UpgradeManager {
- protected SortedSet currentUpgrades = null;
- protected boolean upgradeState = false; // true if upgrade is in progress
- protected int upgradeVersion = 0;
- protected UpgradeCommand broadcastCommand = null;
-
- public synchronized UpgradeCommand getBroadcastCommand() {
- return this.broadcastCommand;
- }
-
- public synchronized boolean getUpgradeState() {
- return this.upgradeState;
- }
-
- public synchronized int getUpgradeVersion(){
- return this.upgradeVersion;
- }
-
- public synchronized void setUpgradeState(boolean uState, int uVersion) {
- this.upgradeState = uState;
- this.upgradeVersion = uVersion;
- }
-
- public SortedSet getDistributedUpgrades() throws IOException {
- return UpgradeObjectCollection.getDistributedUpgrades(
- getUpgradeVersion(), getType());
- }
-
- public synchronized short getUpgradeStatus() {
- if(currentUpgrades == null)
- return 100;
- return currentUpgrades.first().getUpgradeStatus();
- }
-
- public synchronized boolean initializeUpgrade() throws IOException {
- currentUpgrades = getDistributedUpgrades();
- if(currentUpgrades == null) {
- // set new upgrade state
- setUpgradeState(false, HdfsConstants.LAYOUT_VERSION);
- return false;
- }
- Upgradeable curUO = currentUpgrades.first();
- // set and write new upgrade state into disk
- setUpgradeState(true, curUO.getVersion());
- return true;
- }
-
- public synchronized boolean isUpgradeCompleted() {
- if (currentUpgrades == null) {
- return true;
- }
- return false;
- }
-
- public abstract HdfsServerConstants.NodeType getType();
- public abstract boolean startUpgrade() throws IOException;
- public abstract void completeUpgrade() throws IOException;
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObject.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObject.java
deleted file mode 100644
index f432afd9532..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObject.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.common;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection.UOSignature;
-
-/**
- * Abstract upgrade object.
- *
- * Contains default implementation of common methods of {@link Upgradeable}
- * interface.
- */
-@InterfaceAudience.Private
-public abstract class UpgradeObject implements Upgradeable {
- protected short status;
-
- @Override
- public short getUpgradeStatus() {
- return status;
- }
-
- @Override
- public String getDescription() {
- return "Upgrade object for " + getType() + " layout version " + getVersion();
- }
-
- @Override
- public UpgradeStatusReport getUpgradeStatusReport(boolean details)
- throws IOException {
- return new UpgradeStatusReport(getVersion(), getUpgradeStatus(), false);
- }
-
- @Override
- public int compareTo(Upgradeable o) {
- if(this.getVersion() != o.getVersion())
- return (getVersion() > o.getVersion() ? -1 : 1);
- int res = this.getType().toString().compareTo(o.getType().toString());
- if(res != 0)
- return res;
- return getClass().getCanonicalName().compareTo(
- o.getClass().getCanonicalName());
- }
-
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof UpgradeObject)) {
- return false;
- }
- return this.compareTo((UpgradeObject)o) == 0;
- }
-
- @Override
- public int hashCode() {
- return new UOSignature(this).hashCode();
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java
deleted file mode 100644
index b92a0cdb05e..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.common;
-
-import java.io.IOException;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * Collection of upgrade objects.
- *
- * Upgrade objects should be registered here before they can be used.
- */
-@InterfaceAudience.Private
-public class UpgradeObjectCollection {
- static {
- initialize();
- // Registered distributed upgrade objects here
- // registerUpgrade(new UpgradeObject());
- }
-
- static class UOSignature implements Comparable {
- int version;
- HdfsServerConstants.NodeType type;
- String className;
-
- UOSignature(Upgradeable uo) {
- this.version = uo.getVersion();
- this.type = uo.getType();
- this.className = uo.getClass().getCanonicalName();
- }
-
- int getVersion() {
- return version;
- }
-
- HdfsServerConstants.NodeType getType() {
- return type;
- }
-
- String getClassName() {
- return className;
- }
-
- Upgradeable instantiate() throws IOException {
- try {
- return (Upgradeable)Class.forName(getClassName()).newInstance();
- } catch(ClassNotFoundException e) {
- throw new IOException(StringUtils.stringifyException(e));
- } catch(InstantiationException e) {
- throw new IOException(StringUtils.stringifyException(e));
- } catch(IllegalAccessException e) {
- throw new IOException(StringUtils.stringifyException(e));
- }
- }
-
- @Override
- public int compareTo(UOSignature o) {
- if(this.version != o.version)
- return (version < o.version ? -1 : 1);
- int res = this.getType().toString().compareTo(o.getType().toString());
- if(res != 0)
- return res;
- return className.compareTo(o.className);
- }
-
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof UOSignature)) {
- return false;
- }
- return this.compareTo((UOSignature)o) == 0;
- }
-
- @Override
- public int hashCode() {
- return version ^ ((type==null)?0:type.hashCode())
- ^ ((className==null)?0:className.hashCode());
- }
- }
-
- /**
- * Static collection of upgrade objects sorted by version.
- * Layout versions are negative therefore newer versions will go first.
- */
- static SortedSet upgradeTable;
-
- static final void initialize() {
- upgradeTable = new TreeSet();
- }
-
- static void registerUpgrade(Upgradeable uo) {
- // Registered distributed upgrade objects here
- upgradeTable.add(new UOSignature(uo));
- }
-
- public static SortedSet getDistributedUpgrades(int versionFrom,
- HdfsServerConstants.NodeType type
- ) throws IOException {
- assert HdfsConstants.LAYOUT_VERSION <= versionFrom : "Incorrect version "
- + versionFrom + ". Expected to be <= " + HdfsConstants.LAYOUT_VERSION;
- SortedSet upgradeObjects = new TreeSet();
- for(UOSignature sig : upgradeTable) {
- if(sig.getVersion() < HdfsConstants.LAYOUT_VERSION)
- continue;
- if(sig.getVersion() > versionFrom)
- break;
- if(sig.getType() != type )
- continue;
- upgradeObjects.add(sig.instantiate());
- }
- if(upgradeObjects.size() == 0)
- return null;
- return upgradeObjects;
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java
deleted file mode 100644
index 016fd948e84..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.common;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-
-/**
- * Common interface for distributed upgrade objects.
- *
- * Each upgrade object corresponds to a layout version,
- * which is the latest version that should be upgraded using this object.
- * That is all components whose layout version is greater or equal to the
- * one returned by {@link #getVersion()} must be upgraded with this object.
- */
-@InterfaceAudience.Private
-public interface Upgradeable extends Comparable {
- /**
- * Get the layout version of the upgrade object.
- * @return layout version
- */
- int getVersion();
-
- /**
- * Get the type of the software component, which this object is upgrading.
- * @return type
- */
- HdfsServerConstants.NodeType getType();
-
- /**
- * Description of the upgrade object for displaying.
- * @return description
- */
- String getDescription();
-
- /**
- * Upgrade status determines a percentage of the work done out of the total
- * amount required by the upgrade.
- *
- * 100% means that the upgrade is completed.
- * Any value < 100 means it is not complete.
- *
- * The return value should provide at least 2 values, e.g. 0 and 100.
- * @return integer value in the range [0, 100].
- */
- short getUpgradeStatus();
-
- /**
- * Prepare for the upgrade.
- * E.g. initialize upgrade data structures and set status to 0.
- *
- * Returns an upgrade command that is used for broadcasting to other cluster
- * components.
- * E.g. name-node informs data-nodes that they must perform a distributed upgrade.
- *
- * @return an UpgradeCommand for broadcasting.
- * @throws IOException
- */
- UpgradeCommand startUpgrade() throws IOException;
-
- /**
- * Complete upgrade.
- * E.g. cleanup upgrade data structures or write metadata to disk.
- *
- * Returns an upgrade command that is used for broadcasting to other cluster
- * components.
- * E.g. data-nodes inform the name-node that they completed the upgrade
- * while other data-nodes are still upgrading.
- *
- * @throws IOException
- */
- UpgradeCommand completeUpgrade() throws IOException;
-
- /**
- * Get status report for the upgrade.
- *
- * @param details true if upgradeStatus details need to be included,
- * false otherwise
- * @return {@link UpgradeStatusReport}
- * @throws IOException
- */
- UpgradeStatusReport getUpgradeStatusReport(boolean details) throws IOException;
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 4a44efc0d05..0b2c5473898 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -74,7 +74,6 @@ class BPOfferService {
*/
DatanodeRegistration bpRegistration;
- UpgradeManagerDatanode upgradeManager = null;
private final DataNode dn;
/**
@@ -260,33 +259,6 @@ class BPOfferService {
}
}
- synchronized UpgradeManagerDatanode getUpgradeManager() {
- if(upgradeManager == null)
- upgradeManager =
- new UpgradeManagerDatanode(dn, getBlockPoolId());
-
- return upgradeManager;
- }
-
- void processDistributedUpgradeCommand(UpgradeCommand comm)
- throws IOException {
- UpgradeManagerDatanode upgradeManager = getUpgradeManager();
- upgradeManager.processUpgradeCommand(comm);
- }
-
- /**
- * Start distributed upgrade if it should be initiated by the data-node.
- */
- synchronized void startDistributedUpgradeIfNeeded() throws IOException {
- UpgradeManagerDatanode um = getUpgradeManager();
-
- if(!um.getUpgradeState())
- return;
- um.setUpgradeState(false, um.getUpgradeVersion());
- um.startUpgrade();
- return;
- }
-
DataNode getDataNode() {
return dn;
}
@@ -374,9 +346,6 @@ class BPOfferService {
if (bpServices.isEmpty()) {
dn.shutdownBlockPool(this);
-
- if(upgradeManager != null)
- upgradeManager.shutdownUpgrade();
}
}
@@ -593,7 +562,7 @@ class BPOfferService {
break;
case UpgradeCommand.UC_ACTION_START_UPGRADE:
// start distributed upgrade here
- processDistributedUpgradeCommand((UpgradeCommand)cmd);
+ LOG.warn("Distibuted upgrade is no longer supported");
break;
case DatanodeProtocol.DNA_RECOVERBLOCK:
String who = "NameNode at " + actor.getNNSocketAddress();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 478cde12b0e..4f00daaef5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -324,7 +324,7 @@ class BPServiceActor implements Runnable {
* Run an immediate block report on this thread. Used by tests.
*/
@VisibleForTesting
- void triggerBlockReportForTests() throws IOException {
+ void triggerBlockReportForTests() {
synchronized (pendingIncrementalBR) {
lastBlockReport = 0;
lastHeartbeat = 0;
@@ -340,7 +340,7 @@ class BPServiceActor implements Runnable {
}
@VisibleForTesting
- void triggerHeartbeatForTests() throws IOException {
+ void triggerHeartbeatForTests() {
synchronized (pendingIncrementalBR) {
lastHeartbeat = 0;
pendingIncrementalBR.notifyAll();
@@ -355,7 +355,7 @@ class BPServiceActor implements Runnable {
}
@VisibleForTesting
- void triggerDeletionReportForTests() throws IOException {
+ void triggerDeletionReportForTests() {
synchronized (pendingIncrementalBR) {
lastDeletedReport = 0;
pendingIncrementalBR.notifyAll();
@@ -670,7 +670,6 @@ class BPServiceActor implements Runnable {
while (shouldRun()) {
try {
- bpos.startDistributedUpgradeIfNeeded();
offerService();
} catch (Exception ex) {
LOG.error("Exception in BPOfferService for " + this, ex);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index 763d7ea883d..335b2d6e823 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -138,7 +138,7 @@ public class BlockPoolSliceStorage extends Storage {
// During startup some of them can upgrade or roll back
// while others could be up-to-date for the regular startup.
for (int idx = 0; idx < getNumStorageDirs(); idx++) {
- doTransition(datanode, getStorageDir(idx), nsInfo, startOpt);
+ doTransition(getStorageDir(idx), nsInfo, startOpt);
assert getLayoutVersion() == nsInfo.getLayoutVersion()
: "Data-node and name-node layout versions must be the same.";
assert getCTime() == nsInfo.getCTime()
@@ -232,7 +232,7 @@ public class BlockPoolSliceStorage extends Storage {
* @param startOpt startup option
* @throws IOException
*/
- private void doTransition(DataNode datanode, StorageDirectory sd,
+ private void doTransition(StorageDirectory sd,
NamespaceInfo nsInfo, StartupOption startOpt) throws IOException {
if (startOpt == StartupOption.ROLLBACK)
doRollback(sd, nsInfo); // rollback if applicable
@@ -254,13 +254,9 @@ public class BlockPoolSliceStorage extends Storage {
+ blockpoolID);
}
if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION
- && this.cTime == nsInfo.getCTime())
+ && this.cTime == nsInfo.getCTime()) {
return; // regular startup
-
- // verify necessity of a distributed upgrade
- UpgradeManagerDatanode um =
- datanode.getUpgradeManagerDatanode(nsInfo.getBlockPoolID());
- verifyDistributedUpgradeProgress(um, nsInfo);
+ }
if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION
|| this.cTime < nsInfo.getCTime()) {
doUpgrade(sd, nsInfo); // upgrade
@@ -476,13 +472,6 @@ public class BlockPoolSliceStorage extends Storage {
LOG.info( hardLink.linkStats.report() );
}
- private void verifyDistributedUpgradeProgress(UpgradeManagerDatanode um,
- NamespaceInfo nsInfo) throws IOException {
- assert um != null : "DataNode.upgradeManager is null.";
- um.setUpgradeState(false, getLayoutVersion());
- um.initializeUpgrade(nsInfo);
- }
-
/**
* gets the data node storage directory based on block pool storage
*
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
index d5d69148e86..e189c81b882 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
@@ -99,13 +99,8 @@ public class DataBlockScanner implements Runnable {
}
// Wait for at least one block pool to be up
- private void waitForInit(String bpid) {
- UpgradeManagerDatanode um = null;
- if(bpid != null && !bpid.equals(""))
- um = datanode.getUpgradeManagerDatanode(bpid);
-
- while ((um != null && ! um.isUpgradeCompleted())
- || (getBlockPoolSetSize() < datanode.getAllBpOs().length)
+ private void waitForInit() {
+ while ((getBlockPoolSetSize() < datanode.getAllBpOs().length)
|| (getBlockPoolSetSize() < 1)) {
try {
Thread.sleep(5000);
@@ -129,7 +124,7 @@ public class DataBlockScanner implements Runnable {
String nextBpId = null;
while ((nextBpId == null) && datanode.shouldRun
&& !blockScannerThread.isInterrupted()) {
- waitForInit(currentBpId);
+ waitForInit();
synchronized (this) {
if (getBlockPoolSetSize() > 0) {
// Find nextBpId by the minimum of the last scan time
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index ee849a7c543..55d4571ad78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -502,7 +502,7 @@ public class DataNode extends Configured
reason = "verifcation is not supported by SimulatedFSDataset";
}
if (reason == null) {
- directoryScanner = new DirectoryScanner(this, data, conf);
+ directoryScanner = new DirectoryScanner(data, conf);
directoryScanner.start();
} else {
LOG.info("Periodic Directory Tree Verification scan is disabled because " +
@@ -1218,17 +1218,8 @@ public class DataNode extends Configured
return xmitsInProgress.get();
}
- UpgradeManagerDatanode getUpgradeManagerDatanode(String bpid) {
- BPOfferService bpos = blockPoolManager.get(bpid);
- if(bpos==null) {
- return null;
- }
- return bpos.getUpgradeManager();
- }
-
- private void transferBlock( ExtendedBlock block,
- DatanodeInfo xferTargets[]
- ) throws IOException {
+ private void transferBlock(ExtendedBlock block, DatanodeInfo xferTargets[])
+ throws IOException {
BPOfferService bpos = getBPOSForBlock(block);
DatanodeRegistration bpReg = getDNRegistrationForBP(block.getBlockPoolId());
@@ -1866,8 +1857,7 @@ public class DataNode extends Configured
private void recoverBlock(RecoveringBlock rBlock) throws IOException {
ExtendedBlock block = rBlock.getBlock();
String blookPoolId = block.getBlockPoolId();
- DatanodeInfo[] targets = rBlock.getLocations();
- DatanodeID[] datanodeids = (DatanodeID[])targets;
+ DatanodeID[] datanodeids = rBlock.getLocations();
List syncList = new ArrayList(datanodeids.length);
int errorCount = 0;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 88ee89f3ee9..221d6b2d739 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -396,10 +396,6 @@ public class DataStorage extends Storage {
if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION
&& this.cTime == nsInfo.getCTime())
return; // regular startup
- // verify necessity of a distributed upgrade
- UpgradeManagerDatanode um =
- datanode.getUpgradeManagerDatanode(nsInfo.getBlockPoolID());
- verifyDistributedUpgradeProgress(um, nsInfo);
// do upgrade
if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION
@@ -708,14 +704,6 @@ public class DataStorage extends Storage {
new File(to, otherNames[i]), oldLV, hl);
}
- private void verifyDistributedUpgradeProgress(UpgradeManagerDatanode um,
- NamespaceInfo nsInfo
- ) throws IOException {
- assert um != null : "DataNode.upgradeManager is null.";
- um.setUpgradeState(false, getLayoutVersion());
- um.initializeUpgrade(nsInfo);
- }
-
/**
* Add bpStorage into bpStorageMap
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 6111cc8ef19..004af654e63 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.util.Time;
public class DirectoryScanner implements Runnable {
private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
- private final DataNode datanode;
private final FsDatasetSpi> dataset;
private final ExecutorService reportCompileThreadPool;
private final ScheduledExecutorService masterThread;
@@ -222,8 +221,7 @@ public class DirectoryScanner implements Runnable {
}
}
- DirectoryScanner(DataNode dn, FsDatasetSpi> dataset, Configuration conf) {
- this.datanode = dn;
+ DirectoryScanner(FsDatasetSpi> dataset, Configuration conf) {
this.dataset = dataset;
int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
@@ -271,17 +269,6 @@ public class DirectoryScanner implements Runnable {
return;
}
- String[] bpids = dataset.getBlockPoolList();
- for(String bpid : bpids) {
- UpgradeManagerDatanode um =
- datanode.getUpgradeManagerDatanode(bpid);
- if (um != null && !um.isUpgradeCompleted()) {
- //If distributed upgrades underway, exit and wait for next cycle.
- LOG.warn("this cycle terminating immediately because Distributed Upgrade is in process");
- return;
- }
- }
-
//We're are okay to run - do it
reconcile();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
deleted file mode 100644
index 006449438a5..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.common.UpgradeManager;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
-import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-import org.apache.hadoop.util.Daemon;
-
-/**
- * Upgrade manager for data-nodes.
- *
- * Distributed upgrades for a data-node are performed in a separate thread.
- * The upgrade starts when the data-node receives the start upgrade command
- * from the namenode. At that point the manager finds a respective upgrade
- * object and starts a daemon in order to perform the upgrade defined by the
- * object.
- */
-class UpgradeManagerDatanode extends UpgradeManager {
- DataNode dataNode = null;
- Daemon upgradeDaemon = null;
- String bpid = null;
-
- UpgradeManagerDatanode(DataNode dataNode, String bpid) {
- super();
- this.dataNode = dataNode;
- this.bpid = bpid;
- }
-
- @Override
- public HdfsServerConstants.NodeType getType() {
- return HdfsServerConstants.NodeType.DATA_NODE;
- }
-
- synchronized void initializeUpgrade(NamespaceInfo nsInfo) throws IOException {
- if( ! super.initializeUpgrade())
- return; // distr upgrade is not needed
- DataNode.LOG.info("\n Distributed upgrade for DataNode "
- + dataNode.getDisplayName()
- + " version " + getUpgradeVersion() + " to current LV "
- + HdfsConstants.LAYOUT_VERSION + " is initialized.");
- UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first();
- curUO.setDatanode(dataNode, this.bpid);
- upgradeState = curUO.preUpgradeAction(nsInfo);
- // upgradeState is true if the data-node should start the upgrade itself
- }
-
- /**
- * Start distributed upgrade.
- * Instantiates distributed upgrade objects.
- *
- * @return true if distributed upgrade is required or false otherwise
- * @throws IOException
- */
- @Override
- public synchronized boolean startUpgrade() throws IOException {
- if(upgradeState) { // upgrade is already in progress
- assert currentUpgrades != null :
- "UpgradeManagerDatanode.currentUpgrades is null.";
- UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first();
- curUO.startUpgrade();
- return true;
- }
- if(broadcastCommand != null) {
- if(broadcastCommand.getVersion() > this.getUpgradeVersion()) {
- // stop broadcasting, the cluster moved on
- // start upgrade for the next version
- broadcastCommand = null;
- } else {
- // the upgrade has been finished by this data-node,
- // but the cluster is still running it,
- // reply with the broadcast command
- assert currentUpgrades == null :
- "UpgradeManagerDatanode.currentUpgrades is not null.";
- assert upgradeDaemon == null :
- "UpgradeManagerDatanode.upgradeDaemon is not null.";
- DatanodeProtocol nn = dataNode.getActiveNamenodeForBP(bpid);
- nn.processUpgradeCommand(broadcastCommand);
- return true;
- }
- }
- if(currentUpgrades == null)
- currentUpgrades = getDistributedUpgrades();
- if(currentUpgrades == null) {
- DataNode.LOG.info("\n Distributed upgrade for DataNode version "
- + getUpgradeVersion() + " to current LV "
- + HdfsConstants.LAYOUT_VERSION + " cannot be started. "
- + "The upgrade object is not defined.");
- return false;
- }
- upgradeState = true;
- UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first();
- curUO.setDatanode(dataNode, this.bpid);
- curUO.startUpgrade();
- upgradeDaemon = new Daemon(curUO);
- upgradeDaemon.start();
- DataNode.LOG.info("\n Distributed upgrade for DataNode "
- + dataNode.getDisplayName()
- + " version " + getUpgradeVersion() + " to current LV "
- + HdfsConstants.LAYOUT_VERSION + " is started.");
- return true;
- }
-
- synchronized void processUpgradeCommand(UpgradeCommand command
- ) throws IOException {
- assert command.getAction() == UpgradeCommand.UC_ACTION_START_UPGRADE :
- "Only start upgrade action can be processed at this time.";
- this.upgradeVersion = command.getVersion();
- // Start distributed upgrade
- if(startUpgrade()) // upgrade started
- return;
- throw new IOException(
- "Distributed upgrade for DataNode " + dataNode.getDisplayName()
- + " version " + getUpgradeVersion() + " to current LV "
- + HdfsConstants.LAYOUT_VERSION + " cannot be started. "
- + "The upgrade object is not defined.");
- }
-
- @Override
- public synchronized void completeUpgrade() throws IOException {
- assert currentUpgrades != null :
- "UpgradeManagerDatanode.currentUpgrades is null.";
- UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first();
- broadcastCommand = curUO.completeUpgrade();
- upgradeState = false;
- currentUpgrades = null;
- upgradeDaemon = null;
- DataNode.LOG.info("\n Distributed upgrade for DataNode "
- + dataNode.getDisplayName()
- + " version " + getUpgradeVersion() + " to current LV "
- + HdfsConstants.LAYOUT_VERSION + " is complete.");
- }
-
- synchronized void shutdownUpgrade() {
- if(upgradeDaemon != null)
- upgradeDaemon.interrupt();
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
deleted file mode 100644
index 5ae0a1d95b1..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.common.UpgradeObject;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
-import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-import java.io.IOException;
-import java.net.SocketTimeoutException;
-
-/**
- * Base class for data-node upgrade objects.
- * Data-node upgrades are run in separate threads.
- */
-@InterfaceAudience.Private
-public abstract class UpgradeObjectDatanode extends UpgradeObject implements Runnable {
- private DataNode dataNode = null;
- private String bpid = null;
-
- @Override
- public HdfsServerConstants.NodeType getType() {
- return HdfsServerConstants.NodeType.DATA_NODE;
- }
-
- protected DataNode getDatanode() {
- return dataNode;
- }
-
- protected DatanodeProtocol getNamenode() throws IOException {
- return dataNode.getActiveNamenodeForBP(bpid);
- }
-
- void setDatanode(DataNode dataNode, String bpid) {
- this.dataNode = dataNode;
- this.bpid = bpid;
- }
-
- /**
- * Specifies how the upgrade is performed.
- * @throws IOException
- */
- public abstract void doUpgrade() throws IOException;
-
- /**
- * Specifies what to do before the upgrade is started.
- *
- * The default implementation checks whether the data-node missed the upgrade
- * and throws an exception if it did. This leads to the data-node shutdown.
- *
- * Data-nodes usually start distributed upgrade when the name-node replies
- * to its heartbeat with a start upgrade command.
- * Sometimes though, e.g. when a data-node missed the upgrade and wants to
- * catchup with the rest of the cluster, it is necessary to initiate the
- * upgrade directly on the data-node, since the name-node might not ever
- * start it. An override of this method should then return true.
- * And the upgrade will start after data-ndoe registration but before sending
- * its first heartbeat.
- *
- * @param nsInfo name-node versions, verify that the upgrade
- * object can talk to this name-node version if necessary.
- *
- * @throws IOException
- * @return true if data-node itself should start the upgrade or
- * false if it should wait until the name-node starts the upgrade.
- */
- boolean preUpgradeAction(NamespaceInfo nsInfo) throws IOException {
- int nsUpgradeVersion = nsInfo.getDistributedUpgradeVersion();
- if(nsUpgradeVersion >= getVersion())
- return false; // name-node will perform the upgrade
- // Missed the upgrade. Report problem to the name-node and throw exception
- String errorMsg =
- "\n Data-node missed a distributed upgrade and will shutdown."
- + "\n " + getDescription() + "."
- + " Name-node version = " + nsInfo.getLayoutVersion() + ".";
- DataNode.LOG.fatal( errorMsg );
- String bpid = nsInfo.getBlockPoolID();
- dataNode.trySendErrorReport(bpid, DatanodeProtocol.NOTIFY, errorMsg);
- throw new IOException(errorMsg);
- }
-
- @Override
- public void run() {
- assert dataNode != null : "UpgradeObjectDatanode.dataNode is null";
- while(dataNode.shouldRun) {
- try {
- doUpgrade();
- } catch(Exception e) {
- DataNode.LOG.error("Exception in doUpgrade", e);
- }
- break;
- }
-
- // report results
- if(getUpgradeStatus() < 100) {
- DataNode.LOG.info("\n Distributed upgrade for DataNode version "
- + getVersion() + " to current LV "
- + HdfsConstants.LAYOUT_VERSION + " cannot be completed.");
- }
-
- // Complete the upgrade by calling the manager method
- try {
- UpgradeManagerDatanode upgradeManager =
- dataNode.getUpgradeManagerDatanode(bpid);
- if(upgradeManager != null)
- upgradeManager.completeUpgrade();
- } catch(IOException e) {
- DataNode.LOG.error("Exception in completeUpgrade", e);
- }
- }
-
- /**
- * Complete upgrade and return a status complete command for broadcasting.
- *
- * Data-nodes finish upgrade at different times.
- * The data-node needs to re-confirm with the name-node that the upgrade
- * is complete while other nodes are still upgrading.
- */
- @Override
- public UpgradeCommand completeUpgrade() throws IOException {
- return new UpgradeCommand(UpgradeCommand.UC_ACTION_REPORT_STATUS,
- getVersion(), (short)100);
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 271314db365..2b2adb768d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -96,8 +96,6 @@ public class FSImage implements Closeable {
/**
* Construct an FSImage
* @param conf Configuration
- * @see #FSImage(Configuration conf,
- * Collection imageDirs, Collection editsDirs)
* @throws IOException if default directories are invalid.
*/
public FSImage(Configuration conf) throws IOException {
@@ -191,8 +189,6 @@ public class FSImage implements Closeable {
throw new IOException(
"All specified directories are not accessible or do not exist.");
- storage.setUpgradeManager(target.upgradeManager);
-
// 1. For each data directory calculate its state and
// check whether all is consistent before transitioning.
Map dataDirStates =
@@ -227,9 +223,6 @@ public class FSImage implements Closeable {
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
- // check whether distributed upgrade is required and/or should be continued
- storage.verifyDistributedUpgradeProgress(startOpt);
-
// 2. Format unformatted dirs.
for (Iterator it = storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
@@ -320,13 +313,6 @@ public class FSImage implements Closeable {
}
private void doUpgrade(FSNamesystem target) throws IOException {
- if(storage.getDistributedUpgradeState()) {
- // only distributed upgrade need to continue
- // don't do version upgrade
- this.loadFSImage(target, null);
- storage.initializeDistributedUpgrade();
- return;
- }
// Upgrade is allowed only if there are
// no previous fs states in any of the directories
for (Iterator it = storage.dirIterator(); it.hasNext();) {
@@ -409,7 +395,6 @@ public class FSImage implements Closeable {
+ storage.getRemovedStorageDirs().size()
+ " storage directory(ies), previously logged.");
}
- storage.initializeDistributedUpgrade();
}
private void doRollback() throws IOException {
@@ -472,8 +457,6 @@ public class FSImage implements Closeable {
LOG.info("Rollback of " + sd.getRoot()+ " is complete.");
}
isUpgradeFinalized = true;
- // check whether name-node can start in regular mode
- storage.verifyDistributedUpgradeProgress(StartupOption.REGULAR);
}
private void doFinalize(StorageDirectory sd) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ba5ec3db193..3a88e26a156 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -108,7 +108,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -136,7 +135,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -160,7 +158,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
@@ -179,7 +176,6 @@ import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Server;
@@ -942,8 +938,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
NamespaceInfo unprotectedGetNamespaceInfo() {
return new NamespaceInfo(dir.fsImage.getStorage().getNamespaceID(),
getClusterId(), getBlockPoolId(),
- dir.fsImage.getStorage().getCTime(),
- upgradeManager.getUpgradeVersion());
+ dir.fsImage.getStorage().getCTime());
}
/**
@@ -3387,13 +3382,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
nodeReg, blockPoolId, capacity, dfsUsed, remaining, blockPoolUsed,
xceiverCount, maxTransfer, failedVolumes);
- if (cmds == null || cmds.length == 0) {
- DatanodeCommand cmd = upgradeManager.getBroadcastCommand();
- if (cmd != null) {
- cmds = new DatanodeCommand[] {cmd};
- }
- }
-
return new HeartbeatResponse(cmds, createHaStatusHeartbeat());
} finally {
readUnlock();
@@ -3834,24 +3822,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/**
* Leave safe mode.
*
- * Switch to manual safe mode if distributed upgrade is required.
* Check for invalid, under- & over-replicated blocks in the end of startup.
*/
- private synchronized void leave(boolean checkForUpgrades) {
- if(checkForUpgrades) {
- // verify whether a distributed upgrade needs to be started
- boolean needUpgrade = false;
- try {
- needUpgrade = upgradeManager.startUpgrade();
- } catch(IOException e) {
- FSNamesystem.LOG.error("IOException in startDistributedUpgradeIfNeeded", e);
- }
- if(needUpgrade) {
- // switch to manual safe mode
- safeMode = new SafeModeInfo(false);
- return;
- }
- }
+ private synchronized void leave() {
// if not done yet, initialize replication queues.
// In the standby, do not populate repl queues
if (!isPopulatingReplQueues() && !isInStandbyState()) {
@@ -3945,7 +3918,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
// the threshold is reached
if (!isOn() || // safe mode is off
extension <= 0 || threshold <= 0) { // don't need to wait
- this.leave(true); // leave safe mode
+ this.leave(); // leave safe mode
return;
}
if (reached > 0) { // threshold has already been reached before
@@ -4049,10 +4022,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
leaveMsg = "Safe mode will be turned off automatically";
}
if(isManual()) {
- if(upgradeManager.getUpgradeState())
- return leaveMsg + " upon completion of " +
- "the distributed upgrade: upgrade progress = " +
- upgradeManager.getUpgradeStatus() + "%";
leaveMsg = "Use \"hdfs dfsadmin -safemode leave\" to turn safe mode off";
}
@@ -4187,13 +4156,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
LOG.info("NameNode is being shutdown, exit SafeModeMonitor thread. ");
} else {
// leave safe mode and stop the monitor
- try {
- leaveSafeMode(true);
- } catch(SafeModeException es) { // should never happen
- String msg = "SafeModeMonitor may not run during distributed upgrade.";
- assert false : msg;
- throw new RuntimeException(msg, es);
- }
+ leaveSafeMode();
}
smmthread = null;
}
@@ -4204,7 +4167,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
checkSuperuserPrivilege();
switch(action) {
case SAFEMODE_LEAVE: // leave safe mode
- leaveSafeMode(false);
+ leaveSafeMode();
break;
case SAFEMODE_ENTER: // enter safe mode
enterSafeMode(false);
@@ -4389,17 +4352,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* Leave safe mode.
* @throws IOException
*/
- void leaveSafeMode(boolean checkForUpgrades) throws SafeModeException {
+ void leaveSafeMode() {
writeLock();
try {
if (!isInSafeMode()) {
NameNode.stateChangeLog.info("STATE* Safe mode is already OFF.");
return;
}
- if(upgradeManager.getUpgradeState())
- throw new SafeModeException("Distributed upgrade is in progress",
- safeMode);
- safeMode.leave(checkForUpgrades);
+ safeMode.leave();
} finally {
writeUnlock();
}
@@ -4474,18 +4434,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return (blockManager.getBlockCollection(b) != null);
}
- // Distributed upgrade manager
- final UpgradeManagerNamenode upgradeManager = new UpgradeManagerNamenode(this);
-
- UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action
- ) throws IOException {
- return upgradeManager.distributedUpgradeProgress(action);
- }
-
- UpgradeCommand processDistributedUpgradeCommand(UpgradeCommand comm) throws IOException {
- return upgradeManager.processUpgradeCommand(comm);
- }
-
PermissionStatus createFsOwnerPermissions(FsPermission permission) {
return new PermissionStatus(fsOwner.getShortUserName(), supergroup, permission);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index 2a4998735d1..abc871fa9f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -32,8 +32,6 @@ import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.CopyOnWriteArrayList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
@@ -45,7 +43,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
-import org.apache.hadoop.hdfs.server.common.UpgradeManager;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.util.PersistentLongFile;
@@ -65,8 +62,6 @@ import com.google.common.collect.Lists;
@InterfaceAudience.Private
public class NNStorage extends Storage implements Closeable,
StorageErrorReporter {
- private static final Log LOG = LogFactory.getLog(NNStorage.class.getName());
-
static final String DEPRECATED_MESSAGE_DIGEST_PROPERTY = "imageMD5Digest";
static final String LOCAL_URI_SCHEME = "file";
@@ -112,7 +107,6 @@ public class NNStorage extends Storage implements Closeable,
}
}
- private UpgradeManager upgradeManager = null;
protected String blockpoolID = ""; // id of the block pool
/**
@@ -551,11 +545,8 @@ public class NNStorage extends Storage implements Closeable,
public static NamespaceInfo newNamespaceInfo()
throws UnknownHostException {
- return new NamespaceInfo(
- newNamespaceID(),
- newClusterID(),
- newBlockPoolID(),
- 0L, 0);
+ return new NamespaceInfo(newNamespaceID(), newClusterID(),
+ newBlockPoolID(), 0L);
}
public void format() throws IOException {
@@ -600,13 +591,6 @@ public class NNStorage extends Storage implements Closeable,
String sbpid = props.getProperty("blockpoolID");
setBlockPoolID(sd.getRoot(), sbpid);
}
-
- String sDUS, sDUV;
- sDUS = props.getProperty("distributedUpgradeState");
- sDUV = props.getProperty("distributedUpgradeVersion");
- setDistributedUpgradeState(
- sDUS == null? false : Boolean.parseBoolean(sDUS),
- sDUV == null? getLayoutVersion() : Integer.parseInt(sDUV));
setDeprecatedPropertiesForUpgrade(props);
}
@@ -653,13 +637,6 @@ public class NNStorage extends Storage implements Closeable,
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
props.setProperty("blockpoolID", blockpoolID);
}
- boolean uState = getDistributedUpgradeState();
- int uVersion = getDistributedUpgradeVersion();
- if(uState && uVersion != getLayoutVersion()) {
- props.setProperty("distributedUpgradeState", Boolean.toString(uState));
- props.setProperty("distributedUpgradeVersion",
- Integer.toString(uVersion));
- }
}
static File getStorageFile(StorageDirectory sd, NameNodeFile type, long imageTxId) {
@@ -732,7 +709,7 @@ public class NNStorage extends Storage implements Closeable,
* Return the first readable image file for the given txid, or null
* if no such image can be found
*/
- File findImageFile(long txid) throws IOException {
+ File findImageFile(long txid) {
return findFile(NameNodeDirType.IMAGE,
getImageFileName(txid));
}
@@ -753,76 +730,6 @@ public class NNStorage extends Storage implements Closeable,
return null;
}
- /**
- * Set the upgrade manager for use in a distributed upgrade.
- * @param um The upgrade manager
- */
- void setUpgradeManager(UpgradeManager um) {
- upgradeManager = um;
- }
-
- /**
- * @return The current distribued upgrade state.
- */
- boolean getDistributedUpgradeState() {
- return upgradeManager == null ? false : upgradeManager.getUpgradeState();
- }
-
- /**
- * @return The current upgrade version.
- */
- int getDistributedUpgradeVersion() {
- return upgradeManager == null ? 0 : upgradeManager.getUpgradeVersion();
- }
-
- /**
- * Set the upgrade state and version.
- * @param uState the new state.
- * @param uVersion the new version.
- */
- private void setDistributedUpgradeState(boolean uState, int uVersion) {
- if (upgradeManager != null) {
- upgradeManager.setUpgradeState(uState, uVersion);
- }
- }
-
- /**
- * Verify that the distributed upgrade state is valid.
- * @param startOpt the option the namenode was started with.
- */
- void verifyDistributedUpgradeProgress(StartupOption startOpt
- ) throws IOException {
- if(startOpt == StartupOption.ROLLBACK || startOpt == StartupOption.IMPORT)
- return;
-
- assert upgradeManager != null : "FSNameSystem.upgradeManager is null.";
- if(startOpt != StartupOption.UPGRADE) {
- if(upgradeManager.getUpgradeState())
- throw new IOException(
- "\n Previous distributed upgrade was not completed. "
- + "\n Please restart NameNode with -upgrade option.");
- if(upgradeManager.getDistributedUpgrades() != null)
- throw new IOException("\n Distributed upgrade for NameNode version "
- + upgradeManager.getUpgradeVersion()
- + " to current LV " + HdfsConstants.LAYOUT_VERSION
- + " is required.\n Please restart NameNode"
- + " with -upgrade option.");
- }
- }
-
- /**
- * Initialize a distributed upgrade.
- */
- void initializeDistributedUpgrade() throws IOException {
- if(! upgradeManager.initializeUpgrade())
- return;
- // write new upgrade state into disk
- writeAll();
- LOG.info("\n Distributed upgrade for NameNode version "
- + upgradeManager.getUpgradeVersion() + " to current LV "
- + HdfsConstants.LAYOUT_VERSION + " is initialized.");
- }
-
/**
* Disable the check for pre-upgradable layouts. Needed for BackupImage.
* @param val Whether to disable the preupgradeable layout check.
@@ -1099,7 +1006,6 @@ public class NNStorage extends Storage implements Closeable,
getNamespaceID(),
getClusterID(),
getBlockPoolID(),
- getCTime(),
- getDistributedUpgradeVersion());
+ getCTime());
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 8a9ca07e441..7f9dcd29a47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -742,8 +742,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
@Override // ClientProtocol
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
throws IOException {
- namesystem.checkOperation(OperationCategory.READ);
- return namesystem.distributedUpgradeProgress(action);
+ throw new UnsupportedActionException(
+ "Deprecated method. No longer supported");
}
@Override // ClientProtocol
@@ -917,8 +917,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
}
@Override // DatanodeProtocol
- public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException {
- return namesystem.processDistributedUpgradeCommand(comm);
+ public UpgradeCommand processUpgradeCommand(UpgradeCommand comm)
+ throws IOException {
+ throw new UnsupportedActionException(
+ "Deprecated method, no longer supported");
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
index a21bf29fabd..2c1981cb624 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
@@ -120,19 +120,6 @@ class NamenodeJspHelper {
return str;
}
- static String getUpgradeStatusText(FSNamesystem fsn) {
- String statusText = "";
- try {
- UpgradeStatusReport status = fsn
- .distributedUpgradeProgress(UpgradeAction.GET_STATUS);
- statusText = (status == null ? "There are no upgrades in progress."
- : status.getStatusText(false));
- } catch (IOException e) {
- statusText = "Upgrade status unknown.";
- }
- return statusText;
- }
-
/** Return a table containing version information. */
static String getVersionTable(FSNamesystem fsn) {
return "