HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted Yu via yliu)

This commit is contained in:
yliu 2015-02-10 01:57:51 +08:00
parent 1b56d1ce32
commit 260b5e32c4
4 changed files with 14 additions and 7 deletions

View File

@ -887,6 +887,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7647. DatanodeManager.sortLocatedBlocks sorts DatanodeInfos
but not StorageIDs. (Milan Desai via Arpit Agarwal)
HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted
Yu via yliu)
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -15,11 +15,15 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DatanodeInfoWithStorage extends DatanodeInfo {
private final String storageID;
private final StorageType storageType;

View File

@ -25,7 +25,7 @@
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.protocol.DatanodeInfoWithStorage;
import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
import org.apache.hadoop.security.token.Token;
import com.google.common.collect.Lists;
@ -140,7 +140,7 @@ public ExtendedBlock getBlock() {
* {@link org.apache.hadoop.hdfs.protocol.LocatedBlock#invalidateCachedStorageInfo}
* to invalidate the cached Storage ID/Type arrays.
*/
public DatanodeInfoWithStorage[] getLocations() {
public DatanodeInfo[] getLocations() {
return locs;
}

View File

@ -37,7 +37,7 @@
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeInfoWithStorage;
import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.junit.Assert;
@ -274,15 +274,15 @@ public void testSortLocatedBlocks() throws IOException {
dm.sortLocatedBlocks(targetIp, blocks);
// check that storage IDs/types are aligned with datanode locs
DatanodeInfoWithStorage[] sortedLocs = block.getLocations();
DatanodeInfo[] sortedLocs = block.getLocations();
storageIDs = block.getStorageIDs();
storageTypes = block.getStorageTypes();
assertThat(sortedLocs.length, is(5));
assertThat(storageIDs.length, is(5));
assertThat(storageTypes.length, is(5));
for(int i = 0; i < sortedLocs.length; i++) {
assertThat(sortedLocs[i].getStorageID(), is(storageIDs[i]));
assertThat(sortedLocs[i].getStorageType(), is(storageTypes[i]));
assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageID(), is(storageIDs[i]));
assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageType(), is(storageTypes[i]));
}
// Ensure the local node is first.