svn merge -c 1233584 from trunk for HDFS-2768.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23-PB@1233789 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-01-20 06:24:47 +00:00
parent d11a1c56dd
commit f4fee0eb50
3 changed files with 10 additions and 7 deletions

View File

@ -104,6 +104,9 @@ Release 0.23-PB - Unreleased
HDFS-2739. SecondaryNameNode doesn't start up. (jitendra)
HDFS-2768. BackupNode stop can not close proxy connections because
it is not a proxy instance. (Uma Maheswara Rao G via eli)
Release 0.23.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
@ -68,7 +69,7 @@ public class BackupNode extends NameNode {
private static final String BN_SERVICE_RPC_ADDRESS_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY;
/** Name-node proxy */
NamenodeProtocol namenode;
NamenodeProtocolTranslatorPB namenode;
/** Name-node RPC address */
String nnRpcAddress;
/** Name-node HTTP address */
@ -184,7 +185,7 @@ public class BackupNode extends NameNode {
}
// Stop the RPC client
if (namenode != null) {
RPC.stopProxy(namenode);
IOUtils.cleanup(LOG, namenode);
}
namenode = null;
// Stop the checkpoint manager

View File

@ -24,10 +24,9 @@ import java.util.Arrays;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
/**
@ -41,7 +40,7 @@ import org.apache.hadoop.net.NetUtils;
class EditLogBackupOutputStream extends EditLogOutputStream {
static int DEFAULT_BUFFER_SIZE = 256;
private JournalProtocol backupNode; // RPC proxy to backup node
private JournalProtocolTranslatorPB backupNode; // RPC proxy to backup node
private NamenodeRegistration bnRegistration; // backup node registration
private NamenodeRegistration nnRegistration; // active node registration
private EditsDoubleBuffer doubleBuf;
@ -105,14 +104,14 @@ class EditLogBackupOutputStream extends EditLogOutputStream {
throw new IOException("BackupEditStream has " + size +
" records still to be flushed and cannot be closed.");
}
RPC.stopProxy(backupNode); // stop the RPC threads
IOUtils.cleanup(Storage.LOG, backupNode); // stop the RPC threads
doubleBuf.close();
doubleBuf = null;
}
@Override
public void abort() throws IOException {
RPC.stopProxy(backupNode);
IOUtils.cleanup(Storage.LOG, backupNode);
doubleBuf = null;
}