HADOOP-4687. More merges from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/HADOOP-4687/core@786712 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Owen O'Malley 2009-06-19 23:12:41 +00:00
parent b12d765467
commit fb6308f280
3 changed files with 99 additions and 6 deletions

View File

@ -835,6 +835,12 @@ Trunk (unreleased changes)
HADOOP-4041. IsolationRunner does not work as documented. HADOOP-4041. IsolationRunner does not work as documented.
(Philip Zeyliger via tomwhite) (Philip Zeyliger via tomwhite)
HADOOP-6004. Fixes BlockLocation deserialization. (Jakob Homan via
szetszwo)
HADOOP-6079. Serialize proxySource as DatanodeInfo in DataTransferProtocol.
(szetszwo)
Release 0.20.1 - Unreleased Release 0.20.1 - Unreleased
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -932,9 +938,6 @@ Release 0.20.1 - Unreleased
available memory on a tasktracker. available memory on a tasktracker.
(Vinod Kumar Vavilapalli via yhemanth) (Vinod Kumar Vavilapalli via yhemanth)
HADOOP-5937. Correct a safemode message in FSNamesystem. (Ravi Phulari
via szetszwo)
HADOOP-5908. Fixes a problem to do with ArithmeticException in the HADOOP-5908. Fixes a problem to do with ArithmeticException in the
JobTracker when there are jobs with 0 maps. (Amar Kamat via ddas) JobTracker when there are jobs with 0 maps. (Amar Kamat via ddas)
@ -953,6 +956,9 @@ Release 0.20.1 - Unreleased
HADOOP-5884. Fixes accounting in capacity scheduler so that high RAM jobs HADOOP-5884. Fixes accounting in capacity scheduler so that high RAM jobs
take more slots. (Vinod Kumar Vavilapalli via yhemanth) take more slots. (Vinod Kumar Vavilapalli via yhemanth)
HADOOP-5937. Correct a safemode message in FSNamesystem. (Ravi Phulari
via szetszwo)
HADOOP-5869. Fix bug in assignment of setup / cleanup task that was HADOOP-5869. Fix bug in assignment of setup / cleanup task that was
causing TestQueueCapacities to fail. causing TestQueueCapacities to fail.
(Sreekanth Ramakrishnan via yhemanth) (Sreekanth Ramakrishnan via yhemanth)

View File

@ -17,9 +17,14 @@
*/ */
package org.apache.hadoop.fs; package org.apache.hadoop.fs;
import org.apache.hadoop.io.*; import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.*; import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
/* /*
* A BlockLocation lists hosts, offset and length * A BlockLocation lists hosts, offset and length
@ -213,15 +218,19 @@ public void readFields(DataInput in) throws IOException {
name.readFields(in); name.readFields(in);
names[i] = name.toString(); names[i] = name.toString();
} }
int numHosts = in.readInt(); int numHosts = in.readInt();
this.hosts = new String[numHosts];
for (int i = 0; i < numHosts; i++) { for (int i = 0; i < numHosts; i++) {
Text host = new Text(); Text host = new Text();
host.readFields(in); host.readFields(in);
hosts[i] = host.toString(); hosts[i] = host.toString();
} }
int numTops = in.readInt(); int numTops = in.readInt();
Text path = new Text(); topologyPaths = new String[numTops];
for (int i = 0; i < numTops; i++) { for (int i = 0; i < numTops; i++) {
Text path = new Text();
path.readFields(in); path.readFields(in);
topologyPaths[i] = path.toString(); topologyPaths[i] = path.toString();
} }

View File

@ -0,0 +1,78 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.io.DataOutputBuffer;
public class TestBlockLocation extends TestCase {
// Verify fix of bug identified in HADOOP-6004
public void testDeserialization() throws IOException {
// Create a test BlockLocation
String[] names = {"one", "two" };
String[] hosts = {"three", "four" };
String[] topologyPaths = {"five", "six"};
long offset = 25l;
long length = 55l;
BlockLocation bl = new BlockLocation(names, hosts, topologyPaths,
offset, length);
DataOutputBuffer dob = new DataOutputBuffer();
// Serialize it
try {
bl.write(dob);
} catch (IOException e) {
fail("Unable to serialize data: " + e.getMessage());
}
byte[] bytes = dob.getData();
DataInput da = new DataInputStream(new ByteArrayInputStream(bytes));
// Try to re-create the BlockLocation the same way as is done during
// deserialization
BlockLocation bl2 = new BlockLocation();
try {
bl2.readFields(da);
} catch (IOException e) {
fail("Unable to deserialize BlockLocation: " + e.getMessage());
}
// Check that we got back what we started with
verifyDeserialization(bl2.getHosts(), hosts);
verifyDeserialization(bl2.getNames(), names);
verifyDeserialization(bl2.getTopologyPaths(), topologyPaths);
assertEquals(bl2.getOffset(), offset);
assertEquals(bl2.getLength(), length);
}
private void verifyDeserialization(String[] ar1, String[] ar2) {
assertEquals(ar1.length, ar2.length);
for(int i = 0; i < ar1.length; i++)
assertEquals(ar1[i], ar2[i]);
}
}