HDFS-11185. Ozone: remove disabled tests. Contributed by Anu Engineer.
This commit is contained in:
parent
e3591aeec0
commit
fa26dea89d
|
@ -19,7 +19,6 @@
|
|||
package org.apache.hadoop.scm.protocol;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.scm.client.ScmClient;
|
||||
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
|
||||
|
@ -30,18 +29,6 @@ import org.apache.hadoop.scm.container.common.helpers.Pipeline;
|
|||
*/
|
||||
public interface StorageContainerLocationProtocol {
|
||||
|
||||
/**
|
||||
* Find the set of nodes that currently host the container of an object, as
|
||||
* identified by the object key hash. This method supports batch lookup by
|
||||
* passing multiple key hashes.
|
||||
*
|
||||
* @param keys batch of object keys to find
|
||||
* @return located containers for each object key
|
||||
* @throws IOException if there is any failure
|
||||
*/
|
||||
Set<LocatedContainer> getStorageContainerLocations(Set<String> keys)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Asks SCM where a container should be allocated. SCM responds with the
|
||||
* set of datanodes that should be used creating this container.
|
||||
|
|
|
@ -18,24 +18,17 @@ package org.apache.hadoop.scm.protocolPB;
|
|||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.protobuf.RpcController;
|
||||
import com.google.protobuf.ServiceException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtocolTranslator;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.scm.client.ScmClient;
|
||||
import org.apache.hadoop.scm.protocol.LocatedContainer;
|
||||
import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol;
|
||||
import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto;
|
||||
import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto;
|
||||
import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetStorageContainerLocationsRequestProto;
|
||||
import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetStorageContainerLocationsResponseProto;
|
||||
import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.LocatedContainerProto;
|
||||
import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
|
||||
import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto;
|
||||
import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.DeleteContainerRequestProto;
|
||||
|
@ -43,7 +36,6 @@ import org.apache.hadoop.scm.container.common.helpers.Pipeline;
|
|||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* This class is the client-side translator to translate the requests made on
|
||||
|
@ -71,38 +63,6 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
|
|||
this.rpcProxy = rpcProxy;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<LocatedContainer> getStorageContainerLocations(Set<String> keys)
|
||||
throws IOException {
|
||||
GetStorageContainerLocationsRequestProto.Builder req =
|
||||
GetStorageContainerLocationsRequestProto.newBuilder();
|
||||
for (String key : keys) {
|
||||
req.addKeys(key);
|
||||
}
|
||||
final GetStorageContainerLocationsResponseProto resp;
|
||||
try {
|
||||
resp = rpcProxy.getStorageContainerLocations(NULL_RPC_CONTROLLER,
|
||||
req.build());
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
Set<LocatedContainer> locatedContainers =
|
||||
Sets.newLinkedHashSetWithExpectedSize(resp.getLocatedContainersCount());
|
||||
for (LocatedContainerProto locatedContainer :
|
||||
resp.getLocatedContainersList()) {
|
||||
Set<DatanodeInfo> locations = Sets.newLinkedHashSetWithExpectedSize(
|
||||
locatedContainer.getLocationsCount());
|
||||
for (DatanodeInfoProto location : locatedContainer.getLocationsList()) {
|
||||
locations.add(PBHelperClient.convert(location));
|
||||
}
|
||||
locatedContainers.add(new LocatedContainer(locatedContainer.getKey(),
|
||||
locatedContainer.getMatchedKeyPrefix(),
|
||||
locatedContainer.getContainerName(), locations,
|
||||
PBHelperClient.convert(locatedContainer.getLeader())));
|
||||
}
|
||||
return locatedContainers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Asks SCM where a container should be allocated. SCM responds with the set
|
||||
* of datanodes that should be used creating this container.
|
||||
|
|
|
@ -31,32 +31,6 @@ package hadoop.hdfs;
|
|||
import "hdfs.proto";
|
||||
import "Ozone.proto";
|
||||
|
||||
/**
|
||||
* keys - batch of object keys to find
|
||||
*/
|
||||
message GetStorageContainerLocationsRequestProto {
|
||||
repeated string keys = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* locatedContainers - for each requested hash, nodes that currently host the
|
||||
* container for that object key hash
|
||||
*/
|
||||
message GetStorageContainerLocationsResponseProto {
|
||||
repeated LocatedContainerProto locatedContainers = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds the nodes that currently host the container for an object key.
|
||||
*/
|
||||
message LocatedContainerProto {
|
||||
required string key = 1;
|
||||
required string matchedKeyPrefix = 2;
|
||||
required string containerName = 3;
|
||||
repeated DatanodeInfoProto locations = 4;
|
||||
required DatanodeInfoProto leader = 5;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request send to SCM asking where the container should be created.
|
||||
*/
|
||||
|
@ -106,13 +80,6 @@ message DeleteContainerResponseProto {
|
|||
* and response messages for details of the RPC calls.
|
||||
*/
|
||||
service StorageContainerLocationProtocolService {
|
||||
/**
|
||||
* Find the set of nodes that currently host the container of an object, as
|
||||
* identified by the object key hash. This method supports batch lookup by
|
||||
* passing multiple key hashes.
|
||||
*/
|
||||
rpc getStorageContainerLocations(GetStorageContainerLocationsRequestProto)
|
||||
returns(GetStorageContainerLocationsResponseProto);
|
||||
|
||||
/**
|
||||
* Creates a container entry in SCM.
|
||||
|
|
|
@ -18,25 +18,12 @@
|
|||
package org.apache.hadoop.ozone.protocolPB;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.protobuf.RpcController;
|
||||
import com.google.protobuf.ServiceException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
|
||||
import org.apache.hadoop.scm.protocol.LocatedContainer;
|
||||
import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol;
|
||||
import org.apache.hadoop.ozone.protocol.proto
|
||||
.StorageContainerLocationProtocolProtos
|
||||
.GetStorageContainerLocationsRequestProto;
|
||||
import org.apache.hadoop.ozone.protocol.proto
|
||||
.StorageContainerLocationProtocolProtos
|
||||
.GetStorageContainerLocationsResponseProto;
|
||||
import org.apache.hadoop.ozone.protocol.proto
|
||||
.StorageContainerLocationProtocolProtos.LocatedContainerProto;
|
||||
|
||||
import static org.apache.hadoop.ozone.protocol.proto
|
||||
.StorageContainerLocationProtocolProtos.ContainerRequestProto;
|
||||
import org.apache.hadoop.ozone.protocol.proto
|
||||
|
@ -73,39 +60,6 @@ public final class StorageContainerLocationProtocolServerSideTranslatorPB
|
|||
this.impl = impl;
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetStorageContainerLocationsResponseProto getStorageContainerLocations(
|
||||
RpcController unused, GetStorageContainerLocationsRequestProto req)
|
||||
throws ServiceException {
|
||||
Set<String> keys = Sets.newLinkedHashSetWithExpectedSize(
|
||||
req.getKeysCount());
|
||||
for (String key : req.getKeysList()) {
|
||||
keys.add(key);
|
||||
}
|
||||
final Set<LocatedContainer> locatedContainers;
|
||||
try {
|
||||
locatedContainers = impl.getStorageContainerLocations(keys);
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
GetStorageContainerLocationsResponseProto.Builder resp =
|
||||
GetStorageContainerLocationsResponseProto.newBuilder();
|
||||
for (LocatedContainer locatedContainer : locatedContainers) {
|
||||
LocatedContainerProto.Builder locatedContainerProto =
|
||||
LocatedContainerProto.newBuilder()
|
||||
.setKey(locatedContainer.getKey())
|
||||
.setMatchedKeyPrefix(locatedContainer.getMatchedKeyPrefix())
|
||||
.setContainerName(locatedContainer.getContainerName());
|
||||
for (DatanodeInfo location : locatedContainer.getLocations()) {
|
||||
locatedContainerProto.addLocations(PBHelperClient.convert(location));
|
||||
}
|
||||
locatedContainerProto.setLeader(
|
||||
PBHelperClient.convert(locatedContainer.getLeader()));
|
||||
resp.addLocatedContainers(locatedContainerProto.build());
|
||||
}
|
||||
return resp.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ContainerResponseProto allocateContainer(RpcController unused,
|
||||
ContainerRequestProto request) throws ServiceException {
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.apache.hadoop.scm.client.ScmClient;
|
|||
import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock;
|
||||
import org.apache.hadoop.scm.container.common.helpers.DeleteBlockResult;
|
||||
import org.apache.hadoop.scm.protocol.ScmBlockLocationProtocol;
|
||||
import org.apache.hadoop.scm.protocol.LocatedContainer;
|
||||
import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
|
||||
import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol;
|
||||
import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
|
||||
|
@ -376,14 +375,6 @@ public class StorageContainerManager
|
|||
.setDatanodeUUID(rCmd.getDatanodeUUID()).build();
|
||||
}
|
||||
|
||||
// TODO : This code will move into KSM later. Write now this code is stubbed
|
||||
// implementation that lets the ozone tests pass.
|
||||
@Override
|
||||
public Set<LocatedContainer> getStorageContainerLocations(Set<String> keys)
|
||||
throws IOException {
|
||||
throw new IOException("Not Implemented.");
|
||||
}
|
||||
|
||||
/**
|
||||
* Asks SCM where a container should be allocated. SCM responds with the set
|
||||
* of datanodes that should be used creating this container.
|
||||
|
|
|
@ -76,8 +76,6 @@ import java.util.Date;
|
|||
import java.util.Set;
|
||||
import java.util.TimeZone;
|
||||
import java.util.Locale;
|
||||
import java.util.HashSet;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -448,25 +446,6 @@ public final class DistributedStorageHandler implements StorageHandler {
|
|||
return xceiverClientManager.acquireClient(pipeline);
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquires an {@link XceiverClientSpi} connected to a {@link Pipeline}
|
||||
* of nodes capable of serving container protocol operations.
|
||||
* The container is selected based on the specified container key.
|
||||
*
|
||||
* @param containerKey container key
|
||||
* @return XceiverClient connected to a container
|
||||
* @throws IOException if an XceiverClient cannot be acquired
|
||||
*/
|
||||
private XceiverClientSpi acquireXceiverClient(String containerKey)
|
||||
throws IOException {
|
||||
Set<LocatedContainer> locatedContainers =
|
||||
storageContainerLocationClient.getStorageContainerLocations(
|
||||
new HashSet<>(Arrays.asList(containerKey)));
|
||||
Pipeline pipeline = newPipelineFromLocatedContainer(
|
||||
locatedContainers.iterator().next());
|
||||
return xceiverClientManager.acquireClient(pipeline);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a container key from any number of components by combining all
|
||||
* components with a delimiter.
|
||||
|
|
|
@ -159,89 +159,4 @@ public class TestStorageContainerManager {
|
|||
Assert.assertTrue(e instanceof IOException);
|
||||
Assert.assertEquals(expectedErrorMessage, e.getMessage());
|
||||
}
|
||||
|
||||
// TODO : Disabling this test after verifying that failure is due
|
||||
// Not Implemented exception. Will turn on this test in next patch
|
||||
//@Test
|
||||
public void testLocationsForSingleKey() throws Exception {
|
||||
cluster = new MiniOzoneCluster.Builder(conf).numDataNodes(1)
|
||||
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
|
||||
storageContainerLocationClient =
|
||||
cluster.createStorageContainerLocationClient();
|
||||
Set<LocatedContainer> containers =
|
||||
storageContainerLocationClient.getStorageContainerLocations(
|
||||
new LinkedHashSet<>(Arrays.asList("/key1")));
|
||||
assertNotNull(containers);
|
||||
assertEquals(1, containers.size());
|
||||
assertLocatedContainer(containers, "/key1", 1);
|
||||
}
|
||||
|
||||
// TODO : Disabling this test after verifying that failure is due
|
||||
// Not Implemented exception. Will turn on this test in next patch
|
||||
//@Test
|
||||
public void testLocationsForMultipleKeys() throws Exception {
|
||||
cluster = new MiniOzoneCluster.Builder(conf).numDataNodes(1)
|
||||
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
|
||||
storageContainerLocationClient =
|
||||
cluster.createStorageContainerLocationClient();
|
||||
Set<LocatedContainer> containers =
|
||||
storageContainerLocationClient.getStorageContainerLocations(
|
||||
new LinkedHashSet<>(Arrays.asList("/key1", "/key2", "/key3")));
|
||||
assertNotNull(containers);
|
||||
assertEquals(3, containers.size());
|
||||
assertLocatedContainer(containers, "/key1", 1);
|
||||
assertLocatedContainer(containers, "/key2", 1);
|
||||
assertLocatedContainer(containers, "/key3", 1);
|
||||
}
|
||||
// TODO : Disabling this test after verifying that failure is due
|
||||
// Not Implemented exception. Will turn on this test in next patch
|
||||
//@Test
|
||||
public void testNoDataNodes() throws Exception {
|
||||
cluster = new MiniOzoneCluster.Builder(conf).numDataNodes(0)
|
||||
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
|
||||
.doNotwaitTobeOutofChillMode()
|
||||
.build();
|
||||
storageContainerLocationClient =
|
||||
cluster.createStorageContainerLocationClient();
|
||||
exception.expect(IOException.class);
|
||||
exception.expectMessage("locations not found");
|
||||
storageContainerLocationClient.getStorageContainerLocations(
|
||||
new LinkedHashSet<>(Arrays.asList("/key1")));
|
||||
}
|
||||
|
||||
// TODO : Disabling this test after verifying that failure is due
|
||||
// Not Implemented exception. Will turn on this test in next patch
|
||||
//@Test
|
||||
public void testMultipleDataNodes() throws Exception {
|
||||
cluster = new MiniOzoneCluster.Builder(conf).numDataNodes(3)
|
||||
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
|
||||
storageContainerLocationClient =
|
||||
cluster.createStorageContainerLocationClient();
|
||||
Set<LocatedContainer> containers =
|
||||
storageContainerLocationClient.getStorageContainerLocations(
|
||||
new LinkedHashSet<>(Arrays.asList("/key1")));
|
||||
assertNotNull(containers);
|
||||
assertEquals(1, containers.size());
|
||||
assertLocatedContainer(containers, "/key1", 3);
|
||||
}
|
||||
|
||||
private static void assertLocatedContainer(Set<LocatedContainer> containers,
|
||||
String key, int expectedNumLocations) {
|
||||
LocatedContainer container = null;
|
||||
for (LocatedContainer curContainer: containers) {
|
||||
if (key.equals(curContainer.getKey())) {
|
||||
container = curContainer;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertNotNull("Container for key " + key + " not found.", container);
|
||||
assertEquals(key, container.getKey());
|
||||
assertNotNull(container.getMatchedKeyPrefix());
|
||||
assertFalse(container.getMatchedKeyPrefix().isEmpty());
|
||||
assertNotNull(container.getContainerName());
|
||||
assertFalse(container.getContainerName().isEmpty());
|
||||
assertNotNull(container.getLocations());
|
||||
assertEquals(expectedNumLocations, container.getLocations().size());
|
||||
assertNotNull(container.getLeader());
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue