HADOOP-8388. svn merge -c 1336966 from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1336968 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-05-10 23:50:11 +00:00
parent 1135c04b40
commit 45bf0a3cac
3 changed files with 5 additions and 145 deletions

View File

@ -6,6 +6,9 @@ Release 2.0.0 - UNRELEASED
HADOOP-7920. Remove Avro Rpc. (suresh) HADOOP-7920. Remove Avro Rpc. (suresh)
HADOOP-8388. Remove unused BlockLocation serialization.
(Colin Patrick McCabe via eli)
NEW FEATURES NEW FEATURES
HADOOP-7773. Add support for protocol buffer based RPC engine. HADOOP-7773. Add support for protocol buffer based RPC engine.

View File

@ -35,16 +35,7 @@ import org.apache.hadoop.io.WritableFactory;
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Stable @InterfaceStability.Stable
public class BlockLocation implements Writable { public class BlockLocation {
static { // register a ctor
WritableFactories.setFactory
(BlockLocation.class,
new WritableFactory() {
public Writable newInstance() { return new BlockLocation(); }
});
}
private String[] hosts; //hostnames of datanodes private String[] hosts; //hostnames of datanodes
private String[] names; //hostname:portNumber of datanodes private String[] names; //hostname:portNumber of datanodes
private String[] topologyPaths; // full path name in network topology private String[] topologyPaths; // full path name in network topology
@ -219,62 +210,6 @@ public class BlockLocation implements Writable {
} }
} }
/**
* Implement write of Writable
*/
public void write(DataOutput out) throws IOException {
out.writeLong(offset);
out.writeLong(length);
out.writeBoolean(corrupt);
out.writeInt(names.length);
for (int i=0; i < names.length; i++) {
Text name = new Text(names[i]);
name.write(out);
}
out.writeInt(hosts.length);
for (int i=0; i < hosts.length; i++) {
Text host = new Text(hosts[i]);
host.write(out);
}
out.writeInt(topologyPaths.length);
for (int i=0; i < topologyPaths.length; i++) {
Text host = new Text(topologyPaths[i]);
host.write(out);
}
}
/**
* Implement readFields of Writable
*/
public void readFields(DataInput in) throws IOException {
this.offset = in.readLong();
this.length = in.readLong();
this.corrupt = in.readBoolean();
int numNames = in.readInt();
this.names = new String[numNames];
for (int i = 0; i < numNames; i++) {
Text name = new Text();
name.readFields(in);
names[i] = name.toString();
}
int numHosts = in.readInt();
this.hosts = new String[numHosts];
for (int i = 0; i < numHosts; i++) {
Text host = new Text();
host.readFields(in);
hosts[i] = host.toString();
}
int numTops = in.readInt();
topologyPaths = new String[numTops];
for (int i = 0; i < numTops; i++) {
Text path = new Text();
path.readFields(in);
topologyPaths[i] = path.toString();
}
}
public String toString() { public String toString() {
StringBuilder result = new StringBuilder(); StringBuilder result = new StringBuilder();
result.append(offset); result.append(offset);

View File

@ -1,78 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.io.DataOutputBuffer;
public class TestBlockLocation extends TestCase {
// Verify fix of bug identified in HADOOP-6004
public void testDeserialization() throws IOException {
// Create a test BlockLocation
String[] names = {"one", "two" };
String[] hosts = {"three", "four" };
String[] topologyPaths = {"five", "six"};
long offset = 25l;
long length = 55l;
BlockLocation bl = new BlockLocation(names, hosts, topologyPaths,
offset, length);
DataOutputBuffer dob = new DataOutputBuffer();
// Serialize it
try {
bl.write(dob);
} catch (IOException e) {
fail("Unable to serialize data: " + e.getMessage());
}
byte[] bytes = dob.getData();
DataInput da = new DataInputStream(new ByteArrayInputStream(bytes));
// Try to re-create the BlockLocation the same way as is done during
// deserialization
BlockLocation bl2 = new BlockLocation();
try {
bl2.readFields(da);
} catch (IOException e) {
fail("Unable to deserialize BlockLocation: " + e.getMessage());
}
// Check that we got back what we started with
verifyDeserialization(bl2.getHosts(), hosts);
verifyDeserialization(bl2.getNames(), names);
verifyDeserialization(bl2.getTopologyPaths(), topologyPaths);
assertEquals(bl2.getOffset(), offset);
assertEquals(bl2.getLength(), length);
}
private void verifyDeserialization(String[] ar1, String[] ar2) {
assertEquals(ar1.length, ar2.length);
for(int i = 0; i < ar1.length; i++)
assertEquals(ar1[i], ar2[i]);
}
}