HDFS-3244. Remove dead writable code from hdfs/protocol. Contributed by Eli Collins
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1312061 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
58228e4e7b
commit
80447bd35a
|
@ -360,6 +360,8 @@ Release 2.0.0 - UNRELEASED
|
|||
HDFS-3094. add -nonInteractive and -force option to namenode -format
|
||||
command (Arpit Gupta via todd)
|
||||
|
||||
HDFS-3244. Remove dead writable code from hdfs/protocol. (eli)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
|
||||
|
|
|
@ -17,16 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
|
||||
/**
|
||||
* A block and the full path information to the block data file and
|
||||
|
@ -34,20 +26,11 @@ import org.apache.hadoop.io.WritableFactory;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class BlockLocalPathInfo implements Writable {
|
||||
static final WritableFactory FACTORY = new WritableFactory() {
|
||||
public Writable newInstance() { return new BlockLocalPathInfo(); }
|
||||
};
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory(BlockLocalPathInfo.class, FACTORY);
|
||||
}
|
||||
|
||||
public class BlockLocalPathInfo {
|
||||
private ExtendedBlock block;
|
||||
private String localBlockPath = ""; // local file storing the data
|
||||
private String localMetaPath = ""; // local file storing the checksum
|
||||
|
||||
public BlockLocalPathInfo() {}
|
||||
|
||||
/**
|
||||
* Constructs BlockLocalPathInfo.
|
||||
* @param b The block corresponding to this lock path info.
|
||||
|
@ -77,21 +60,6 @@ public class BlockLocalPathInfo implements Writable {
|
|||
*/
|
||||
public String getMetaPath() {return localMetaPath;}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
block.write(out);
|
||||
Text.writeString(out, localBlockPath);
|
||||
Text.writeString(out, localMetaPath);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
block = new ExtendedBlock();
|
||||
block.readFields(in);
|
||||
localBlockPath = Text.readString(in);
|
||||
localMetaPath = Text.readString(in);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get number of bytes in the block.
|
||||
* @return Number of bytes in the block.
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
|
||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||
import org.apache.hadoop.security.KerberosInfo;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenInfo;
|
||||
|
@ -42,9 +41,6 @@ public interface ClientDatanodeProtocol {
|
|||
* the client interface to the DN AND the RPC protocol used to
|
||||
* communicate with the NN.
|
||||
*
|
||||
* Post version 10 (release 23 of Hadoop), the protocol is implemented in
|
||||
* {@literal ../protocolR23Compatible/ClientDatanodeWireProtocol}
|
||||
*
|
||||
* This class is used by both the DFSClient and the
|
||||
* DN server side to insulate from the protocol serialization.
|
||||
*
|
||||
|
@ -60,7 +56,6 @@ public interface ClientDatanodeProtocol {
|
|||
*
|
||||
* 9 is the last version id when this class was used for protocols
|
||||
* serialization. DO not update this version any further.
|
||||
* Changes are recorded in R23 classes.
|
||||
*/
|
||||
public static final long versionID = 9L;
|
||||
|
||||
|
|
|
@ -66,9 +66,6 @@ public interface ClientProtocol {
|
|||
* the client interface to the NN AND the RPC protocol used to
|
||||
* communicate with the NN.
|
||||
*
|
||||
* Post version 70 (release 23 of Hadoop), the protocol is implemented in
|
||||
* {@literal ../protocolR23Compatible/ClientNamenodeWireProtocol}
|
||||
*
|
||||
* This class is used by both the DFSClient and the
|
||||
* NN server side to insulate from the protocol serialization.
|
||||
*
|
||||
|
@ -84,7 +81,6 @@ public interface ClientProtocol {
|
|||
*
|
||||
* 69L is the last version id when this class was used for protocols
|
||||
* serialization. DO not update this version any further.
|
||||
* Changes are recorded in R23 classes.
|
||||
*/
|
||||
public static final long versionID = 69L;
|
||||
|
||||
|
|
|
@ -17,11 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
|
@ -29,7 +24,7 @@ import java.util.Arrays;
|
|||
* used for iterative calls to NameNode.listCorruptFileBlocks.
|
||||
*
|
||||
*/
|
||||
public class CorruptFileBlocks implements Writable {
|
||||
public class CorruptFileBlocks {
|
||||
// used for hashCode
|
||||
private static final int PRIME = 16777619;
|
||||
|
||||
|
@ -53,28 +48,6 @@ public class CorruptFileBlocks implements Writable {
|
|||
return cookie;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
int fileCount = in.readInt();
|
||||
files = new String[fileCount];
|
||||
for (int i = 0; i < fileCount; i++) {
|
||||
files[i] = Text.readString(in);
|
||||
}
|
||||
cookie = Text.readString(in);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeInt(files.length);
|
||||
for (int i = 0; i < files.length; i++) {
|
||||
Text.writeString(out, files[i]);
|
||||
}
|
||||
Text.writeString(out, cookie);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
|
|
|
@ -18,15 +18,9 @@
|
|||
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.WritableComparable;
|
||||
|
||||
/**
|
||||
* This class represents the primary identifier for a Datanode.
|
||||
|
@ -41,8 +35,8 @@ import org.apache.hadoop.io.WritableComparable;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class DatanodeID implements WritableComparable<DatanodeID> {
|
||||
public static final DatanodeID[] EMPTY_ARRAY = {};
|
||||
public class DatanodeID implements Comparable<DatanodeID> {
|
||||
public static final DatanodeID[] EMPTY_ARRAY = {};
|
||||
|
||||
protected String ipAddr; // IP address
|
||||
protected String hostName; // hostname
|
||||
|
@ -51,10 +45,6 @@ public class DatanodeID implements WritableComparable<DatanodeID> {
|
|||
protected int infoPort; // info server port
|
||||
protected int ipcPort; // IPC server port
|
||||
|
||||
public DatanodeID() {
|
||||
this("", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
|
||||
}
|
||||
|
||||
public DatanodeID(String ipAddr, int xferPort) {
|
||||
this(ipAddr, "", "", xferPort,
|
||||
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||
|
@ -234,28 +224,4 @@ public class DatanodeID implements WritableComparable<DatanodeID> {
|
|||
public int compareTo(DatanodeID that) {
|
||||
return getXferAddr().compareTo(that.getXferAddr());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
Text.writeString(out, ipAddr);
|
||||
Text.writeString(out, hostName);
|
||||
Text.writeString(out, storageID);
|
||||
out.writeShort(xferPort);
|
||||
out.writeShort(infoPort);
|
||||
out.writeShort(ipcPort);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
ipAddr = Text.readString(in);
|
||||
hostName = Text.readString(in);
|
||||
storageID = Text.readString(in);
|
||||
// The port read could be negative, if the port is a large number (more
|
||||
// than 15 bits in storage size (but less than 16 bits).
|
||||
// So chop off the first two bytes (and hence the signed bits) before
|
||||
// setting the field.
|
||||
xferPort = in.readShort() & 0x0000ffff;
|
||||
infoPort = in.readShort() & 0x0000ffff;
|
||||
ipcPort = in.readShort() & 0x0000ffff;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,19 +17,11 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Date;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.net.NetworkTopology;
|
||||
import org.apache.hadoop.net.Node;
|
||||
|
@ -78,11 +70,6 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
|||
|
||||
protected AdminStates adminState;
|
||||
|
||||
public DatanodeInfo() {
|
||||
super();
|
||||
adminState = null;
|
||||
}
|
||||
|
||||
public DatanodeInfo(DatanodeInfo from) {
|
||||
super(from);
|
||||
this.capacity = from.getCapacity();
|
||||
|
@ -356,50 +343,6 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
|||
public int getLevel() { return level; }
|
||||
public void setLevel(int level) {this.level = level;}
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
// Writable
|
||||
/////////////////////////////////////////////////
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory
|
||||
(DatanodeInfo.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() { return new DatanodeInfo(); }
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
super.write(out);
|
||||
out.writeLong(capacity);
|
||||
out.writeLong(dfsUsed);
|
||||
out.writeLong(remaining);
|
||||
out.writeLong(blockPoolUsed);
|
||||
out.writeLong(lastUpdate);
|
||||
out.writeInt(xceiverCount);
|
||||
Text.writeString(out, location);
|
||||
WritableUtils.writeEnum(out, getAdminState());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
super.readFields(in);
|
||||
this.capacity = in.readLong();
|
||||
this.dfsUsed = in.readLong();
|
||||
this.remaining = in.readLong();
|
||||
this.blockPoolUsed = in.readLong();
|
||||
this.lastUpdate = in.readLong();
|
||||
this.xceiverCount = in.readInt();
|
||||
this.location = Text.readString(in);
|
||||
setAdminState(WritableUtils.readEnum(in, AdminStates.class));
|
||||
}
|
||||
|
||||
/** Read a DatanodeInfo */
|
||||
public static DatanodeInfo read(DataInput in) throws IOException {
|
||||
final DatanodeInfo d = new DatanodeInfo();
|
||||
d.readFields(in);
|
||||
return d;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// Super implementation is sufficient
|
||||
|
|
|
@ -16,15 +16,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
|
||||
/**
|
||||
* This class defines a partial listing of a directory to support
|
||||
|
@ -32,24 +25,10 @@ import org.apache.hadoop.io.WritableFactory;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class DirectoryListing implements Writable {
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory
|
||||
(DirectoryListing.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() { return new DirectoryListing(); }
|
||||
});
|
||||
}
|
||||
|
||||
public class DirectoryListing {
|
||||
private HdfsFileStatus[] partialListing;
|
||||
private int remainingEntries;
|
||||
|
||||
/**
|
||||
* default constructor
|
||||
*/
|
||||
public DirectoryListing() {
|
||||
}
|
||||
|
||||
/**
|
||||
* constructor
|
||||
* @param partialListing a partial listing of a directory
|
||||
|
@ -103,39 +82,4 @@ public class DirectoryListing implements Writable {
|
|||
}
|
||||
return partialListing[partialListing.length-1].getLocalNameInBytes();
|
||||
}
|
||||
|
||||
// Writable interface
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
int numEntries = in.readInt();
|
||||
partialListing = new HdfsFileStatus[numEntries];
|
||||
if (numEntries !=0 ) {
|
||||
boolean hasLocation = in.readBoolean();
|
||||
for (int i=0; i<numEntries; i++) {
|
||||
if (hasLocation) {
|
||||
partialListing[i] = new HdfsLocatedFileStatus();
|
||||
} else {
|
||||
partialListing[i] = new HdfsFileStatus();
|
||||
}
|
||||
partialListing[i].readFields(in);
|
||||
}
|
||||
}
|
||||
remainingEntries = in.readInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeInt(partialListing.length);
|
||||
if (partialListing.length != 0) {
|
||||
if (partialListing[0] instanceof HdfsLocatedFileStatus) {
|
||||
out.writeBoolean(true);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
for (HdfsFileStatus fileStatus : partialListing) {
|
||||
fileStatus.write(out);
|
||||
}
|
||||
}
|
||||
out.writeInt(remainingEntries);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,34 +17,18 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.DeprecatedUTF8;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
|
||||
/**
|
||||
* Identifies a Block uniquely across the block pools
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class ExtendedBlock implements Writable {
|
||||
public class ExtendedBlock {
|
||||
private String poolId;
|
||||
private Block block;
|
||||
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory(ExtendedBlock.class, new WritableFactory() {
|
||||
public Writable newInstance() {
|
||||
return new ExtendedBlock();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public ExtendedBlock() {
|
||||
this(null, 0, 0, 0);
|
||||
}
|
||||
|
@ -68,28 +52,6 @@ public class ExtendedBlock implements Writable {
|
|||
block = new Block(blkid, len, genstamp);
|
||||
}
|
||||
|
||||
public void write(DataOutput out) throws IOException {
|
||||
DeprecatedUTF8.writeString(out, poolId);
|
||||
block.writeHelper(out);
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
this.poolId = DeprecatedUTF8.readString(in);
|
||||
block.readHelper(in);
|
||||
}
|
||||
|
||||
// Write only the identifier part of the block
|
||||
public void writeId(DataOutput out) throws IOException {
|
||||
DeprecatedUTF8.writeString(out, poolId);
|
||||
block.writeId(out);
|
||||
}
|
||||
|
||||
// Read only the identifier part of the block
|
||||
public void readId(DataInput in) throws IOException {
|
||||
this.poolId = DeprecatedUTF8.readString(in);
|
||||
block.readId(in);
|
||||
}
|
||||
|
||||
public String getBlockPoolId() {
|
||||
return poolId;
|
||||
}
|
||||
|
|
|
@ -17,32 +17,17 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
|
||||
/** Interface that represents the over the wire information for a file.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class HdfsFileStatus implements Writable {
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory
|
||||
(HdfsFileStatus.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() { return new HdfsFileStatus(); }
|
||||
});
|
||||
}
|
||||
public class HdfsFileStatus {
|
||||
|
||||
private byte[] path; // local name of the inode that's encoded in java UTF8
|
||||
private byte[] symlink; // symlink target encoded in java UTF8 or null
|
||||
|
@ -58,13 +43,6 @@ public class HdfsFileStatus implements Writable {
|
|||
|
||||
public static final byte[] EMPTY_NAME = new byte[0];
|
||||
|
||||
/**
|
||||
* default constructor
|
||||
*/
|
||||
public HdfsFileStatus() {
|
||||
this(0, false, 0, 0, 0, 0, null, null, null, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param length the number of bytes the file has
|
||||
|
@ -242,50 +220,4 @@ public class HdfsFileStatus implements Writable {
|
|||
final public byte[] getSymlinkInBytes() {
|
||||
return symlink;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
// Writable
|
||||
//////////////////////////////////////////////////
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeInt(path.length);
|
||||
out.write(path);
|
||||
out.writeLong(length);
|
||||
out.writeBoolean(isdir);
|
||||
out.writeShort(block_replication);
|
||||
out.writeLong(blocksize);
|
||||
out.writeLong(modification_time);
|
||||
out.writeLong(access_time);
|
||||
permission.write(out);
|
||||
Text.writeString(out, owner);
|
||||
Text.writeString(out, group);
|
||||
out.writeBoolean(isSymlink());
|
||||
if (isSymlink()) {
|
||||
out.writeInt(symlink.length);
|
||||
out.write(symlink);
|
||||
}
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
int numOfBytes = in.readInt();
|
||||
if (numOfBytes == 0) {
|
||||
this.path = EMPTY_NAME;
|
||||
} else {
|
||||
this.path = new byte[numOfBytes];
|
||||
in.readFully(path);
|
||||
}
|
||||
this.length = in.readLong();
|
||||
this.isdir = in.readBoolean();
|
||||
this.block_replication = in.readShort();
|
||||
blocksize = in.readLong();
|
||||
modification_time = in.readLong();
|
||||
access_time = in.readLong();
|
||||
permission.readFields(in);
|
||||
owner = Text.readString(in);
|
||||
group = Text.readString(in);
|
||||
if (in.readBoolean()) {
|
||||
numOfBytes = in.readInt();
|
||||
this.symlink = new byte[numOfBytes];
|
||||
in.readFully(symlink);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,10 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -34,12 +30,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
|||
public class HdfsLocatedFileStatus extends HdfsFileStatus {
|
||||
private LocatedBlocks locations;
|
||||
|
||||
/**
|
||||
* Default constructor
|
||||
*/
|
||||
public HdfsLocatedFileStatus() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*
|
||||
|
@ -69,22 +59,4 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
|
|||
public LocatedBlocks getBlockLocations() {
|
||||
return locations;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
// Writable
|
||||
//////////////////////////////////////////////////
|
||||
public void write(DataOutput out) throws IOException {
|
||||
super.write(out);
|
||||
if (!isDir() && !isSymlink()) {
|
||||
locations.write(out);
|
||||
}
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
super.readFields(in);
|
||||
if (!isDir() && !isSymlink()) {
|
||||
locations = new LocatedBlocks();
|
||||
locations.readFields(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,11 +20,8 @@ package org.apache.hadoop.hdfs.protocol;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.io.*;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
/****************************************************
|
||||
* A LocatedBlock is a pair of Block, DatanodeInfo[]
|
||||
* objects. It tells where to find a Block.
|
||||
|
@ -32,15 +29,7 @@ import java.io.*;
|
|||
****************************************************/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class LocatedBlock implements Writable {
|
||||
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory
|
||||
(LocatedBlock.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() { return new LocatedBlock(); }
|
||||
});
|
||||
}
|
||||
public class LocatedBlock {
|
||||
|
||||
private ExtendedBlock b;
|
||||
private long offset; // offset of the first byte of the block in the file
|
||||
|
@ -124,41 +113,6 @@ public class LocatedBlock implements Writable {
|
|||
return this.corrupt;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////
|
||||
// Writable
|
||||
///////////////////////////////////////////
|
||||
public void write(DataOutput out) throws IOException {
|
||||
blockToken.write(out);
|
||||
out.writeBoolean(corrupt);
|
||||
out.writeLong(offset);
|
||||
b.write(out);
|
||||
out.writeInt(locs.length);
|
||||
for (int i = 0; i < locs.length; i++) {
|
||||
locs[i].write(out);
|
||||
}
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
blockToken.readFields(in);
|
||||
this.corrupt = in.readBoolean();
|
||||
offset = in.readLong();
|
||||
this.b = new ExtendedBlock();
|
||||
b.readFields(in);
|
||||
int count = in.readInt();
|
||||
this.locs = new DatanodeInfo[count];
|
||||
for (int i = 0; i < locs.length; i++) {
|
||||
locs[i] = new DatanodeInfo();
|
||||
locs[i].readFields(in);
|
||||
}
|
||||
}
|
||||
|
||||
/** Read LocatedBlock from in. */
|
||||
public static LocatedBlock read(DataInput in) throws IOException {
|
||||
final LocatedBlock lb = new LocatedBlock();
|
||||
lb.readFields(in);
|
||||
return lb;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getClass().getSimpleName() + "{" + b
|
||||
|
|
|
@ -17,26 +17,19 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
|
||||
/**
|
||||
* Collection of blocks with their locations and the file length.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class LocatedBlocks implements Writable {
|
||||
public class LocatedBlocks {
|
||||
private long fileLength;
|
||||
private List<LocatedBlock> blocks; // array of blocks with prioritized locations
|
||||
private boolean underConstruction;
|
||||
|
@ -167,61 +160,6 @@ public class LocatedBlocks implements Writable {
|
|||
return binSearchResult >= 0 ? binSearchResult : -(binSearchResult+1);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
// Writable
|
||||
//////////////////////////////////////////////////
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory
|
||||
(LocatedBlocks.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() { return new LocatedBlocks(); }
|
||||
});
|
||||
}
|
||||
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeLong(this.fileLength);
|
||||
out.writeBoolean(underConstruction);
|
||||
|
||||
//write the last located block
|
||||
final boolean isNull = lastLocatedBlock == null;
|
||||
out.writeBoolean(isNull);
|
||||
if (!isNull) {
|
||||
lastLocatedBlock.write(out);
|
||||
}
|
||||
out.writeBoolean(isLastBlockComplete);
|
||||
|
||||
// write located blocks
|
||||
int nrBlocks = locatedBlockCount();
|
||||
out.writeInt(nrBlocks);
|
||||
if (nrBlocks == 0) {
|
||||
return;
|
||||
}
|
||||
for (LocatedBlock blk : this.blocks) {
|
||||
blk.write(out);
|
||||
}
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
this.fileLength = in.readLong();
|
||||
underConstruction = in.readBoolean();
|
||||
|
||||
//read the last located block
|
||||
final boolean isNull = in.readBoolean();
|
||||
if (!isNull) {
|
||||
lastLocatedBlock = LocatedBlock.read(in);
|
||||
}
|
||||
isLastBlockComplete = in.readBoolean();
|
||||
|
||||
// read located blocks
|
||||
int nrBlocks = in.readInt();
|
||||
this.blocks = new ArrayList<LocatedBlock>(nrBlocks);
|
||||
for (int idx = 0; idx < nrBlocks; idx++) {
|
||||
LocatedBlock blk = new LocatedBlock();
|
||||
blk.readFields(in);
|
||||
this.blocks.add(blk);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder b = new StringBuilder(getClass().getSimpleName());
|
||||
|
|
|
@ -39,12 +39,10 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlo
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||
import org.apache.hadoop.ipc.ProtocolTranslator;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RpcClientUtil;
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
||||
|
@ -102,24 +101,16 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.retry.RetryPolicies;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.io.retry.RetryProxy;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.ipc.RpcClientUtil;
|
||||
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
|
@ -127,8 +118,8 @@ import com.google.protobuf.ServiceException;
|
|||
|
||||
/**
|
||||
* This class forwards NN's ClientProtocol calls as RPC calls to the NN server
|
||||
* while translating from the parameter types used in ClientProtocol to those
|
||||
* used in protocolR23Compatile.*.
|
||||
* while translating from the parameter types used in ClientProtocol to the
|
||||
* new PB types.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Stable
|
||||
|
|
|
@ -22,10 +22,8 @@ import java.io.Closeable;
|
|||
import java.io.IOException;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RpcClientUtil;
|
||||
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransacti
|
|||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
||||
|
@ -46,7 +45,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
|||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RpcClientUtil;
|
||||
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
|
||||
|
|
|
@ -22,10 +22,8 @@ import java.io.Closeable;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RpcClientUtil;
|
||||
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
|
||||
|
|
|
@ -23,10 +23,8 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RpcClientUtil;
|
||||
import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
|
||||
|
|
|
@ -1,110 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.protocolR23Compatible;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class ProtocolSignatureWritable implements Writable {
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory
|
||||
(ProtocolSignatureWritable.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() { return new ProtocolSignatureWritable(); }
|
||||
});
|
||||
}
|
||||
|
||||
private long version;
|
||||
private int[] methods = null; // an array of method hash codes
|
||||
|
||||
public static org.apache.hadoop.ipc.ProtocolSignature convert(
|
||||
final ProtocolSignatureWritable ps) {
|
||||
if (ps == null) return null;
|
||||
return new org.apache.hadoop.ipc.ProtocolSignature(
|
||||
ps.getVersion(), ps.getMethods());
|
||||
}
|
||||
|
||||
public static ProtocolSignatureWritable convert(
|
||||
final org.apache.hadoop.ipc.ProtocolSignature ps) {
|
||||
if (ps == null) return null;
|
||||
return new ProtocolSignatureWritable(ps.getVersion(), ps.getMethods());
|
||||
}
|
||||
|
||||
/**
|
||||
* default constructor
|
||||
*/
|
||||
public ProtocolSignatureWritable() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*
|
||||
* @param version server version
|
||||
* @param methodHashcodes hash codes of the methods supported by server
|
||||
*/
|
||||
public ProtocolSignatureWritable(long version, int[] methodHashcodes) {
|
||||
this.version = version;
|
||||
this.methods = methodHashcodes;
|
||||
}
|
||||
|
||||
public long getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public int[] getMethods() {
|
||||
return methods;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
version = in.readLong();
|
||||
boolean hasMethods = in.readBoolean();
|
||||
if (hasMethods) {
|
||||
int numMethods = in.readInt();
|
||||
methods = new int[numMethods];
|
||||
for (int i=0; i<numMethods; i++) {
|
||||
methods[i] = in.readInt();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeLong(version);
|
||||
if (methods == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeInt(methods.length);
|
||||
for (int method : methods) {
|
||||
out.writeInt(method);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<head>
|
||||
<title>Namenode Client Protocols Compatible with the version
|
||||
of Hadoop Release 23</title>
|
||||
</head>
|
||||
<body>
|
||||
<p>
|
||||
This package is for ALL versions of HDFS protocols that use writable data types
|
||||
and are compatible with the version of the protocol that was
|
||||
shipped with Release 23 of Hadoop.
|
||||
</p>
|
||||
|
||||
Compatibility should be maintained:
|
||||
<ul>
|
||||
<li> Do NOT delete any methods </li>
|
||||
<li> Do NOT change the signatures of any method:
|
||||
do not change parameters, parameter types
|
||||
or exceptions thrown by the method.</li>
|
||||
</ul>
|
||||
<p>
|
||||
You can add new methods and new types. If you need to change a method's
|
||||
signature, please add a new method instead.
|
||||
When you add new methods and new types do NOT change the version number.
|
||||
<p>
|
||||
Version number is changed ONLY when compatibility is broken (which
|
||||
should be very rare and a big deal).
|
||||
</p>
|
|
@ -153,8 +153,6 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
|||
*/
|
||||
private boolean disallowed = false;
|
||||
|
||||
public DatanodeDescriptor() {}
|
||||
|
||||
/**
|
||||
* DatanodeDescriptor constructor
|
||||
* @param nodeID id of the data node
|
||||
|
|
|
@ -17,13 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.common;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
|
||||
|
@ -33,16 +27,16 @@ import com.google.common.base.Joiner;
|
|||
* TODO namespaceID should be long and computed as hash(address + port)
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class StorageInfo implements Writable {
|
||||
public class StorageInfo {
|
||||
public int layoutVersion; // layout version of the storage data
|
||||
public int namespaceID; // id of the file system
|
||||
public String clusterID; // id of the cluster
|
||||
public long cTime; // creation time of the file system state
|
||||
|
||||
|
||||
public StorageInfo () {
|
||||
this(0, 0, "", 0L);
|
||||
}
|
||||
|
||||
|
||||
public StorageInfo(int layoutV, int nsID, String cid, long cT) {
|
||||
layoutVersion = layoutV;
|
||||
clusterID = cid;
|
||||
|
@ -83,23 +77,6 @@ public class StorageInfo implements Writable {
|
|||
namespaceID = from.namespaceID;
|
||||
cTime = from.cTime;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
// Writable
|
||||
/////////////////////////////////////////////////
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeInt(getLayoutVersion());
|
||||
out.writeInt(getNamespaceID());
|
||||
WritableUtils.writeString(out, clusterID);
|
||||
out.writeLong(getCTime());
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
layoutVersion = in.readInt();
|
||||
namespaceID = in.readInt();
|
||||
clusterID = WritableUtils.readString(in);
|
||||
cTime = in.readLong();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
|
|
@ -17,14 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.common;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
|
||||
/**
|
||||
* Base upgrade upgradeStatus class.
|
||||
|
@ -33,17 +26,11 @@ import org.apache.hadoop.io.WritableFactory;
|
|||
* Describes status of current upgrade.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class UpgradeStatusReport implements Writable {
|
||||
public class UpgradeStatusReport {
|
||||
protected int version;
|
||||
protected short upgradeStatus;
|
||||
protected boolean finalized;
|
||||
|
||||
public UpgradeStatusReport() {
|
||||
this.version = 0;
|
||||
this.upgradeStatus = 0;
|
||||
this.finalized = false;
|
||||
}
|
||||
|
||||
public UpgradeStatusReport(int version, short status, boolean isFinalized) {
|
||||
this.version = version;
|
||||
this.upgradeStatus = status;
|
||||
|
@ -98,29 +85,4 @@ public class UpgradeStatusReport implements Writable {
|
|||
public String toString() {
|
||||
return getStatusText(false);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
// Writable
|
||||
/////////////////////////////////////////////////
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory
|
||||
(UpgradeStatusReport.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() { return new UpgradeStatusReport(); }
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeInt(this.version);
|
||||
out.writeShort(this.upgradeStatus);
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
this.version = in.readInt();
|
||||
this.upgradeStatus = in.readShort();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,15 +17,11 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImage;
|
||||
import org.apache.hadoop.io.WritableComparable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
||||
import com.google.common.collect.ComparisonChain;
|
||||
|
||||
|
@ -33,16 +29,15 @@ import com.google.common.collect.ComparisonChain;
|
|||
* A unique signature intended to identify checkpoint transactions.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class CheckpointSignature extends StorageInfo
|
||||
implements WritableComparable<CheckpointSignature> {
|
||||
public class CheckpointSignature extends StorageInfo
|
||||
implements Comparable<CheckpointSignature> {
|
||||
|
||||
private static final String FIELD_SEPARATOR = ":";
|
||||
private static final int NUM_FIELDS = 7;
|
||||
String blockpoolID = "";
|
||||
long mostRecentCheckpointTxId;
|
||||
long curSegmentTxId;
|
||||
|
||||
public CheckpointSignature() {}
|
||||
|
||||
CheckpointSignature(FSImage fsImage) {
|
||||
super(fsImage.getStorage());
|
||||
blockpoolID = fsImage.getBlockPoolID();
|
||||
|
@ -162,21 +157,4 @@ public class CheckpointSignature extends StorageInfo
|
|||
(int)(cTime ^ mostRecentCheckpointTxId ^ curSegmentTxId)
|
||||
^ clusterID.hashCode() ^ blockpoolID.hashCode();
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
// Writable
|
||||
/////////////////////////////////////////////////
|
||||
public void write(DataOutput out) throws IOException {
|
||||
super.write(out);
|
||||
WritableUtils.writeString(out, blockpoolID);
|
||||
out.writeLong(mostRecentCheckpointTxId);
|
||||
out.writeLong(curSegmentTxId);
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
super.readFields(in);
|
||||
blockpoolID = WritableUtils.readString(in);
|
||||
mostRecentCheckpointTxId = in.readLong();
|
||||
curSegmentTxId = in.readLong();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,9 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.ArrayList;
|
||||
|
||||
|
@ -58,14 +55,6 @@ public class BlockRecoveryCommand extends DatanodeCommand {
|
|||
public static class RecoveringBlock extends LocatedBlock {
|
||||
private long newGenerationStamp;
|
||||
|
||||
/**
|
||||
* Create empty RecoveringBlock.
|
||||
*/
|
||||
public RecoveringBlock() {
|
||||
super();
|
||||
newGenerationStamp = -1L;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create RecoveringBlock.
|
||||
*/
|
||||
|
|
|
@ -17,16 +17,9 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
||||
/** A class to implement an array of BlockLocations
|
||||
* It provide efficient customized serialization/deserialization methods
|
||||
|
@ -34,23 +27,17 @@ import org.apache.hadoop.io.WritableUtils;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class BlocksWithLocations implements Writable {
|
||||
public class BlocksWithLocations {
|
||||
|
||||
/**
|
||||
* A class to keep track of a block and its locations
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public static class BlockWithLocations implements Writable {
|
||||
public static class BlockWithLocations {
|
||||
Block block;
|
||||
String datanodeIDs[];
|
||||
|
||||
/** default constructor */
|
||||
public BlockWithLocations() {
|
||||
block = new Block();
|
||||
datanodeIDs = null;
|
||||
}
|
||||
|
||||
/** constructor */
|
||||
public BlockWithLocations(Block b, String[] datanodes) {
|
||||
block = b;
|
||||
|
@ -66,33 +53,10 @@ public class BlocksWithLocations implements Writable {
|
|||
public String[] getDatanodes() {
|
||||
return datanodeIDs;
|
||||
}
|
||||
|
||||
/** deserialization method */
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
block.readFields(in);
|
||||
int len = WritableUtils.readVInt(in); // variable length integer
|
||||
datanodeIDs = new String[len];
|
||||
for(int i=0; i<len; i++) {
|
||||
datanodeIDs[i] = Text.readString(in);
|
||||
}
|
||||
}
|
||||
|
||||
/** serialization method */
|
||||
public void write(DataOutput out) throws IOException {
|
||||
block.write(out);
|
||||
WritableUtils.writeVInt(out, datanodeIDs.length); // variable length int
|
||||
for(String id:datanodeIDs) {
|
||||
Text.writeString(out, id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private BlockWithLocations[] blocks;
|
||||
|
||||
/** default constructor */
|
||||
BlocksWithLocations() {
|
||||
}
|
||||
|
||||
/** Constructor with one parameter */
|
||||
public BlocksWithLocations( BlockWithLocations[] blocks ) {
|
||||
this.blocks = blocks;
|
||||
|
@ -102,22 +66,4 @@ public class BlocksWithLocations implements Writable {
|
|||
public BlockWithLocations[] getBlocks() {
|
||||
return blocks;
|
||||
}
|
||||
|
||||
/** serialization method */
|
||||
public void write( DataOutput out ) throws IOException {
|
||||
WritableUtils.writeVInt(out, blocks.length);
|
||||
for(int i=0; i<blocks.length; i++) {
|
||||
blocks[i].write(out);
|
||||
}
|
||||
}
|
||||
|
||||
/** deserialization method */
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
int len = WritableUtils.readVInt(in);
|
||||
blocks = new BlockWithLocations[len];
|
||||
for(int i=0; i<len; i++) {
|
||||
blocks[i] = new BlockWithLocations();
|
||||
blocks[i].readFields(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,20 +18,12 @@
|
|||
|
||||
package org.apache.hadoop.hdfs.server.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
|
||||
/**
|
||||
* DatanodeRegistration class contains all information the name-node needs
|
||||
|
@ -41,23 +33,11 @@ import org.apache.hadoop.io.WritableFactory;
|
|||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class DatanodeRegistration extends DatanodeID
|
||||
implements Writable, NodeRegistration {
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory
|
||||
(DatanodeRegistration.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() { return new DatanodeRegistration(); }
|
||||
});
|
||||
}
|
||||
implements NodeRegistration {
|
||||
|
||||
private StorageInfo storageInfo;
|
||||
private ExportedBlockKeys exportedKeys;
|
||||
|
||||
public DatanodeRegistration() {
|
||||
this("", DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||
new StorageInfo(), new ExportedBlockKeys());
|
||||
}
|
||||
|
||||
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
|
||||
ExportedBlockKeys keys) {
|
||||
super(dn);
|
||||
|
@ -118,30 +98,6 @@ implements Writable, NodeRegistration {
|
|||
+ ")";
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
// Writable
|
||||
/////////////////////////////////////////////////
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
super.write(out);
|
||||
|
||||
//TODO: move it to DatanodeID once HADOOP-2797 has been committed
|
||||
out.writeShort(ipcPort);
|
||||
|
||||
storageInfo.write(out);
|
||||
exportedKeys.write(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
super.readFields(in);
|
||||
|
||||
//TODO: move it to DatanodeID once HADOOP-2797 has been committed
|
||||
this.ipcPort = in.readShort() & 0x0000ffff;
|
||||
|
||||
storageInfo.readFields(in);
|
||||
exportedKeys.readFields(in);
|
||||
}
|
||||
@Override
|
||||
public boolean equals(Object to) {
|
||||
return super.equals(to);
|
||||
|
|
|
@ -17,31 +17,21 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.io.ObjectWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
/**
|
||||
* Response to {@link DatanodeProtocol#sendHeartbeat}
|
||||
*/
|
||||
public class HeartbeatResponse implements Writable {
|
||||
public class HeartbeatResponse {
|
||||
/** Commands returned from the namenode to the datanode */
|
||||
private DatanodeCommand[] commands;
|
||||
|
||||
/** Information about the current HA-related state of the NN */
|
||||
private NNHAStatusHeartbeat haStatus;
|
||||
|
||||
public HeartbeatResponse() {
|
||||
// Empty constructor required for Writable
|
||||
}
|
||||
|
||||
public HeartbeatResponse(DatanodeCommand[] cmds,
|
||||
NNHAStatusHeartbeat haStatus) {
|
||||
commands = cmds;
|
||||
|
@ -55,31 +45,4 @@ public class HeartbeatResponse implements Writable {
|
|||
public NNHAStatusHeartbeat getNameNodeHaState() {
|
||||
return haStatus;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////
|
||||
// Writable
|
||||
///////////////////////////////////////////
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
int length = commands == null ? 0 : commands.length;
|
||||
out.writeInt(length);
|
||||
for (int i = 0; i < length; i++) {
|
||||
ObjectWritable.writeObject(out, commands[i], commands[i].getClass(),
|
||||
null, true);
|
||||
}
|
||||
haStatus.write(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
int length = in.readInt();
|
||||
commands = new DatanodeCommand[length];
|
||||
ObjectWritable objectWritable = new ObjectWritable();
|
||||
for (int i = 0; i < length; i++) {
|
||||
commands[i] = (DatanodeCommand) ObjectWritable.readObject(in,
|
||||
objectWritable, null);
|
||||
}
|
||||
haStatus = new NNHAStatusHeartbeat();
|
||||
haStatus.readFields(in);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,9 +42,6 @@ public interface InterDatanodeProtocol {
|
|||
* the interface to the DN AND the RPC protocol used to communicate with the
|
||||
* DN.
|
||||
*
|
||||
* Post version 6L (release 23 of Hadoop), the protocol is implemented in
|
||||
* {@literal ../protocolR23Compatible/InterDatanodeWireProtocol}
|
||||
*
|
||||
* This class is used by both the DN to insulate from the protocol
|
||||
* serialization.
|
||||
*
|
||||
|
|
|
@ -17,26 +17,17 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class NNHAStatusHeartbeat implements Writable {
|
||||
public class NNHAStatusHeartbeat {
|
||||
|
||||
private State state;
|
||||
private long txid = HdfsConstants.INVALID_TXID;
|
||||
|
||||
public NNHAStatusHeartbeat() {
|
||||
}
|
||||
|
||||
public NNHAStatusHeartbeat(State state, long txid) {
|
||||
this.state = state;
|
||||
this.txid = txid;
|
||||
|
@ -50,21 +41,6 @@ public class NNHAStatusHeartbeat implements Writable {
|
|||
return txid;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////
|
||||
// Writable
|
||||
///////////////////////////////////////////
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
WritableUtils.writeEnum(out, state);
|
||||
out.writeLong(txid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
state = WritableUtils.readEnum(in, State.class);
|
||||
txid = in.readLong();
|
||||
}
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public enum State {
|
||||
ACTIVE,
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||
import org.apache.hadoop.security.KerberosInfo;
|
||||
|
||||
/*****************************************************************************
|
||||
|
@ -42,9 +41,6 @@ public interface NamenodeProtocol {
|
|||
* the client interface to the NN AND the RPC protocol used to
|
||||
* communicate with the NN.
|
||||
*
|
||||
* Post version 70 (release 23 of Hadoop), the protocol is implemented in
|
||||
* {@literal ../protocolR23Compatible/ClientNamenodeWireProtocol}
|
||||
*
|
||||
* This class is used by both the DFSClient and the
|
||||
* NN server side to insulate from the protocol serialization.
|
||||
*
|
||||
|
|
|
@ -18,14 +18,6 @@
|
|||
|
||||
package org.apache.hadoop.hdfs.server.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
|
@ -44,10 +36,6 @@ implements NodeRegistration {
|
|||
String httpAddress; // HTTP address of the node
|
||||
NamenodeRole role; // node role
|
||||
|
||||
public NamenodeRegistration() {
|
||||
super();
|
||||
}
|
||||
|
||||
public NamenodeRegistration(String address,
|
||||
String httpAddress,
|
||||
StorageInfo storageInfo,
|
||||
|
@ -95,31 +83,4 @@ implements NodeRegistration {
|
|||
public boolean isRole(NamenodeRole that) {
|
||||
return role.equals(that);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
// Writable
|
||||
/////////////////////////////////////////////////
|
||||
static {
|
||||
WritableFactories.setFactory
|
||||
(NamenodeRegistration.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() { return new NamenodeRegistration(); }
|
||||
});
|
||||
}
|
||||
|
||||
@Override // Writable
|
||||
public void write(DataOutput out) throws IOException {
|
||||
Text.writeString(out, rpcAddress);
|
||||
Text.writeString(out, httpAddress);
|
||||
Text.writeString(out, role.name());
|
||||
super.write(out);
|
||||
}
|
||||
|
||||
@Override // Writable
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
rpcAddress = Text.readString(in);
|
||||
httpAddress = Text.readString(in);
|
||||
role = NamenodeRole.valueOf(Text.readString(in));
|
||||
super.readFields(in);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
|
||||
package org.apache.hadoop.hdfs.server.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
@ -28,11 +26,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
||||
import org.apache.hadoop.hdfs.DeprecatedUTF8;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
||||
/**
|
||||
* NamespaceInfo is returned by the name-node in reply
|
||||
|
@ -76,31 +69,6 @@ public class NamespaceInfo extends StorageInfo {
|
|||
return blockPoolID;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
// Writable
|
||||
/////////////////////////////////////////////////
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory
|
||||
(NamespaceInfo.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() { return new NamespaceInfo(); }
|
||||
});
|
||||
}
|
||||
|
||||
public void write(DataOutput out) throws IOException {
|
||||
DeprecatedUTF8.writeString(out, getBuildVersion());
|
||||
super.write(out);
|
||||
out.writeInt(getDistributedUpgradeVersion());
|
||||
WritableUtils.writeString(out, blockPoolID);
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
buildVersion = DeprecatedUTF8.readString(in);
|
||||
super.readFields(in);
|
||||
distributedUpgradeVersion = in.readInt();
|
||||
blockPoolID = WritableUtils.readString(in);
|
||||
}
|
||||
|
||||
public String toString(){
|
||||
return super.toString() + ";bpid=" + blockPoolID;
|
||||
}
|
||||
|
|
|
@ -18,19 +18,12 @@
|
|||
|
||||
package org.apache.hadoop.hdfs.server.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
||||
/**
|
||||
* A data structure to store the blocks in an incremental block report.
|
||||
*/
|
||||
public class ReceivedDeletedBlockInfo implements Writable {
|
||||
public class ReceivedDeletedBlockInfo {
|
||||
Block block;
|
||||
BlockStatus status;
|
||||
String delHints;
|
||||
|
@ -113,25 +106,6 @@ public class ReceivedDeletedBlockInfo implements Writable {
|
|||
return status == BlockStatus.DELETED_BLOCK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
this.block.write(out);
|
||||
WritableUtils.writeVInt(out, this.status.code);
|
||||
if (this.status == BlockStatus.DELETED_BLOCK) {
|
||||
Text.writeString(out, this.delHints);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
this.block = new Block();
|
||||
this.block.readFields(in);
|
||||
this.status = BlockStatus.fromCode(WritableUtils.readVInt(in));
|
||||
if (this.status == BlockStatus.DELETED_BLOCK) {
|
||||
this.delHints = Text.readString(in);
|
||||
}
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return block.toString() + ", status: " + status +
|
||||
", delHint: " + delHints;
|
||||
|
|
|
@ -18,17 +18,10 @@
|
|||
|
||||
package org.apache.hadoop.hdfs.server.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
|
||||
/**
|
||||
* Replica recovery information.
|
||||
|
@ -38,9 +31,6 @@ import org.apache.hadoop.io.WritableFactory;
|
|||
public class ReplicaRecoveryInfo extends Block {
|
||||
private ReplicaState originalState;
|
||||
|
||||
public ReplicaRecoveryInfo() {
|
||||
}
|
||||
|
||||
public ReplicaRecoveryInfo(long blockId, long diskLen, long gs, ReplicaState rState) {
|
||||
set(blockId, diskLen, gs);
|
||||
originalState = rState;
|
||||
|
@ -59,27 +49,4 @@ public class ReplicaRecoveryInfo extends Block {
|
|||
public int hashCode() {
|
||||
return super.hashCode();
|
||||
}
|
||||
|
||||
///////////////////////////////////////////
|
||||
// Writable
|
||||
///////////////////////////////////////////
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory
|
||||
(ReplicaRecoveryInfo.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() { return new ReplicaRecoveryInfo(); }
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
super.readFields(in);
|
||||
originalState = ReplicaState.read(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
super.write(out);
|
||||
originalState.write(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,6 +57,7 @@ import org.apache.hadoop.fs.FileSystem.Statistics;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
|
@ -705,4 +706,14 @@ public class DFSTestUtil {
|
|||
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, Joiner.on(",")
|
||||
.join(nameservices));
|
||||
}
|
||||
|
||||
public static DatanodeDescriptor getLocalDatanodeDescriptor() {
|
||||
return new DatanodeDescriptor(
|
||||
new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
|
||||
}
|
||||
|
||||
public static DatanodeInfo getLocalDatanodeInfo() {
|
||||
return new DatanodeInfo(
|
||||
new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ public class TestDFSUtil {
|
|||
*/
|
||||
@Test
|
||||
public void testLocatedBlocks2Locations() {
|
||||
DatanodeInfo d = new DatanodeInfo();
|
||||
DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
|
||||
DatanodeInfo[] ds = new DatanodeInfo[1];
|
||||
ds[0] = d;
|
||||
|
||||
|
|
|
@ -121,7 +121,9 @@ public class TestGetBlocks extends TestCase {
|
|||
getBlocksWithException(namenode, dataNodes[0], -1);
|
||||
|
||||
// get blocks of size BlockSize from a non-existent datanode
|
||||
getBlocksWithException(namenode, new DatanodeInfo(), 2);
|
||||
DatanodeInfo info = DFSTestUtil.getLocalDatanodeInfo();
|
||||
info.setIpAddr("1.2.3.4");
|
||||
getBlocksWithException(namenode, info, 2);
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
@ -132,7 +134,7 @@ public class TestGetBlocks extends TestCase {
|
|||
long size) throws IOException {
|
||||
boolean getException = false;
|
||||
try {
|
||||
namenode.getBlocks(new DatanodeInfo(), 2);
|
||||
namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2);
|
||||
} catch(RemoteException e) {
|
||||
getException = true;
|
||||
assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));
|
||||
|
|
|
@ -1,79 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
|
||||
public class TestCorruptFileBlocks {
|
||||
|
||||
/**
|
||||
* Serialize the cfb given, deserialize and return the result.
|
||||
*/
|
||||
static CorruptFileBlocks serializeAndDeserialize(CorruptFileBlocks cfb)
|
||||
throws IOException {
|
||||
DataOutputBuffer buf = new DataOutputBuffer();
|
||||
cfb.write(buf);
|
||||
|
||||
byte[] data = buf.getData();
|
||||
DataInputStream input = new DataInputStream(new ByteArrayInputStream(data));
|
||||
|
||||
CorruptFileBlocks result = new CorruptFileBlocks();
|
||||
result.readFields(input);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether cfb is unchanged after serialization and deserialization.
|
||||
*/
|
||||
static boolean checkSerialize(CorruptFileBlocks cfb)
|
||||
throws IOException {
|
||||
return cfb.equals(serializeAndDeserialize(cfb));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test serialization and deserializaton of CorruptFileBlocks.
|
||||
*/
|
||||
@Test
|
||||
public void testSerialization() throws IOException {
|
||||
{
|
||||
CorruptFileBlocks cfb = new CorruptFileBlocks();
|
||||
assertTrue("cannot serialize empty CFB", checkSerialize(cfb));
|
||||
}
|
||||
|
||||
{
|
||||
String[] files = new String[0];
|
||||
CorruptFileBlocks cfb = new CorruptFileBlocks(files, "");
|
||||
assertTrue("cannot serialize CFB with empty cookie", checkSerialize(cfb));
|
||||
}
|
||||
|
||||
{
|
||||
String[] files = { "a", "bb", "ccc" };
|
||||
CorruptFileBlocks cfb = new CorruptFileBlocks(files, "test");
|
||||
assertTrue("cannot serialize CFB", checkSerialize(cfb));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -23,6 +23,7 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
|
@ -441,9 +442,9 @@ public class TestPBHelper {
|
|||
Block[] blocks = new Block[] { new Block(21), new Block(22) };
|
||||
DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
|
||||
new DatanodeInfo[2] };
|
||||
dnInfos[0][0] = new DatanodeInfo();
|
||||
dnInfos[1][0] = new DatanodeInfo();
|
||||
dnInfos[1][1] = new DatanodeInfo();
|
||||
dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
|
||||
dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
|
||||
dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
|
||||
BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
|
||||
blocks, dnInfos);
|
||||
BlockCommandProto bcProto = PBHelper.convert(bc);
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.util.Random;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
|
@ -47,7 +48,7 @@ public class TestBlockInfo {
|
|||
|
||||
final int MAX_BLOCKS = 10;
|
||||
|
||||
DatanodeDescriptor dd = new DatanodeDescriptor();
|
||||
DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
|
||||
ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
|
||||
ArrayList<BlockInfo> blockInfoList = new ArrayList<BlockInfo>();
|
||||
int headIndex;
|
||||
|
|
|
@ -28,6 +28,7 @@ import junit.framework.TestCase;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
|
||||
|
||||
|
@ -80,8 +81,8 @@ public class TestCorruptReplicaInfo extends TestCase {
|
|||
block_ids.add((long)i);
|
||||
}
|
||||
|
||||
DatanodeDescriptor dn1 = new DatanodeDescriptor();
|
||||
DatanodeDescriptor dn2 = new DatanodeDescriptor();
|
||||
DatanodeDescriptor dn1 = DFSTestUtil.getLocalDatanodeDescriptor();
|
||||
DatanodeDescriptor dn2 = DFSTestUtil.getLocalDatanodeDescriptor();
|
||||
|
||||
crm.addToCorruptReplicasMap(getBlock(0), dn1, "TEST");
|
||||
assertEquals("Number of corrupt blocks not returning correctly",
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
|
|||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
||||
|
||||
|
@ -36,7 +37,7 @@ public class TestDatanodeDescriptor extends TestCase {
|
|||
final int REMAINING_BLOCKS = 2;
|
||||
final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
|
||||
|
||||
DatanodeDescriptor dd = new DatanodeDescriptor();
|
||||
DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
|
||||
ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
|
||||
for (int i=0; i<MAX_BLOCKS; i++) {
|
||||
blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
|
||||
|
@ -49,7 +50,7 @@ public class TestDatanodeDescriptor extends TestCase {
|
|||
}
|
||||
|
||||
public void testBlocksCounter() throws Exception {
|
||||
DatanodeDescriptor dd = new DatanodeDescriptor();
|
||||
DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
|
||||
assertEquals(0, dd.numBlocks());
|
||||
BlockInfo blk = new BlockInfo(new Block(1L), 1);
|
||||
BlockInfo blk1 = new BlockInfo(new Block(2L), 2);
|
||||
|
|
|
@ -1,78 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.common;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutput;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import junit.framework.Assert;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
/**
|
||||
* This is a unit test, which tests {@link Util#stringAsURI(String)}
|
||||
* for IDs being used in HDFS, e.g. ClusterID and BlockPoolID.
|
||||
*/
|
||||
public class TestStorageInfo extends TestCase {
|
||||
|
||||
/**
|
||||
* Test write() / readFieds() of StroageInfo. Write StorageInfo into a buffer
|
||||
* then read it back and the result should be the same with the original one.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void testStorageInfo() throws IOException {
|
||||
|
||||
int nsID = 123;
|
||||
String cid = "cid-test";
|
||||
int layoutV = 234;
|
||||
long cT = 0L;
|
||||
|
||||
StorageInfo sinfo = new StorageInfo(layoutV, nsID, cid, cT);
|
||||
|
||||
Assert.assertNotNull(sinfo);
|
||||
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
DataOutput output = new DataOutputStream(bos);
|
||||
|
||||
try {
|
||||
// we need to first create an DataOutputStream for sinfo to write into
|
||||
sinfo.write(output);
|
||||
//remember to close the DataOutputStream
|
||||
//to make sure the data has been written
|
||||
bos.close();
|
||||
|
||||
// convert ByteArrayInputStream to ByteArrayOutputStream
|
||||
ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray());
|
||||
DataInputStream dataInputStream = new DataInputStream(bis);
|
||||
|
||||
StorageInfo secondsinfo = new StorageInfo();
|
||||
secondsinfo.readFields(dataInputStream);
|
||||
|
||||
// compare
|
||||
Assert.assertEquals(sinfo.getClusterID(), secondsinfo.getClusterID());
|
||||
Assert.assertEquals(sinfo.getNamespaceID(), secondsinfo.getNamespaceID());
|
||||
Assert.assertEquals(sinfo.getLayoutVersion(), secondsinfo.getLayoutVersion());
|
||||
Assert.assertEquals(sinfo.getCTime(), secondsinfo.getCTime());
|
||||
}catch (IOException e) {
|
||||
e.getMessage();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue