HDFS-13423. Ozone: Clean-up of ozone related change from hadoop-hdfs-project. Contributed by Nanda Kumar.

This commit is contained in:
Mukul Kumar Singh 2018-04-13 14:13:06 +05:30 committed by Owen O'Malley
parent d5a8e60256
commit 979bbb4019
29 changed files with 108 additions and 111 deletions

View File

@ -22,18 +22,26 @@ import com.google.common.base.Optional;
import com.google.common.base.Strings; import com.google.common.base.Strings;
import com.google.common.net.HostAndPort; import com.google.common.net.HostAndPort;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.nio.file.Paths; import java.nio.file.Paths;
import java.util.Collection; import java.util.Collection;
import java.util.HashSet; import java.util.HashSet;
import static org.apache.hadoop.hdfs.DFSConfigKeys
.DFS_DATANODE_DNS_INTERFACE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys
.DFS_DATANODE_DNS_NAMESERVER_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
@ -269,4 +277,42 @@ public class HddsUtils {
} }
return dataNodeIDPath; return dataNodeIDPath;
} }
/**
* Returns the hostname for this datanode. If the hostname is not
* explicitly configured in the given config, then it is determined
* via the DNS class.
*
* @param conf Configuration
*
* @return the hostname (NB: may not be a FQDN)
* @throws UnknownHostException if the dfs.datanode.dns.interface
* option is used and the hostname can not be determined
*/
public static String getHostName(Configuration conf)
throws UnknownHostException {
String name = conf.get(DFS_DATANODE_HOST_NAME_KEY);
if (name == null) {
String dnsInterface = conf.get(
CommonConfigurationKeys.HADOOP_SECURITY_DNS_INTERFACE_KEY);
String nameServer = conf.get(
CommonConfigurationKeys.HADOOP_SECURITY_DNS_NAMESERVER_KEY);
boolean fallbackToHosts = false;
if (dnsInterface == null) {
// Try the legacy configuration keys.
dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY);
nameServer = conf.get(DFS_DATANODE_DNS_NAMESERVER_KEY);
} else {
// If HADOOP_SECURITY_DNS_* is set then also attempt hosts file
// resolution if DNS fails. We will not use hosts file resolution
// by default to avoid breaking existing clusters.
fallbackToHosts = true;
}
name = DNS.getDefaultHost(dnsInterface, nameServer, fallbackToHosts);
}
return name;
}
} }

View File

@ -22,7 +22,6 @@ import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@ -67,7 +66,7 @@ public class HddsDatanodeService implements ServicePlugin {
} }
if (HddsUtils.isHddsEnabled(conf)) { if (HddsUtils.isHddsEnabled(conf)) {
try { try {
String hostname = DataNode.getHostName(conf); String hostname = HddsUtils.getHostName(conf);
String ip = InetAddress.getByName(hostname).getHostAddress(); String ip = InetAddress.getByName(hostname).getHostAddress();
datanodeDetails = initializeDatanodeDetails(); datanodeDetails = initializeDatanodeDetails();
datanodeDetails.setHostName(hostname); datanodeDetails.setHostName(hostname);

View File

@ -84,8 +84,7 @@ public interface HdfsServerConstants {
enum NodeType { enum NodeType {
NAME_NODE, NAME_NODE,
DATA_NODE, DATA_NODE,
JOURNAL_NODE, JOURNAL_NODE
STORAGE_CONTAINER_SERVICE
} }
/** Startup options for rolling upgrade. */ /** Startup options for rolling upgrade. */

View File

@ -262,8 +262,4 @@ public class StorageInfo {
} }
return props; return props;
} }
public NodeType getNodeType() {
return storageType;
}
} }

View File

@ -914,7 +914,7 @@ public class DataNode extends ReconfigurableBase
* @throws UnknownHostException if the dfs.datanode.dns.interface * @throws UnknownHostException if the dfs.datanode.dns.interface
* option is used and the hostname can not be determined * option is used and the hostname can not be determined
*/ */
public static String getHostName(Configuration config) private static String getHostName(Configuration config)
throws UnknownHostException { throws UnknownHostException {
String name = config.get(DFS_DATANODE_HOST_NAME_KEY); String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
if (name == null) { if (name == null) {

View File

@ -43,7 +43,7 @@ import org.apache.hadoop.security.http.RestCsrfPreventionFilter.HttpInteraction;
* handler drops the request and immediately sends an HTTP 400 response. * handler drops the request and immediately sends an HTTP 400 response.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public final class RestCsrfPreventionFilterHandler final class RestCsrfPreventionFilterHandler
extends SimpleChannelInboundHandler<HttpRequest> { extends SimpleChannelInboundHandler<HttpRequest> {
private static final Log LOG = DatanodeHttpServer.LOG; private static final Log LOG = DatanodeHttpServer.LOG;

View File

@ -187,14 +187,6 @@ message StorageInfoProto {
required uint32 namespceID = 2; // File system namespace ID required uint32 namespceID = 2; // File system namespace ID
required string clusterID = 3; // ID of the cluster required string clusterID = 3; // ID of the cluster
required uint64 cTime = 4; // File system creation time required uint64 cTime = 4; // File system creation time
enum NodeTypeProto {
NAME_NODE = 1;
DATA_NODE = 2;
JOURNAL_NODE = 3;
STORAGE_CONTAINER_SERVICE = 4;
}
optional NodeTypeProto nodeType = 5;
} }
/** /**

View File

@ -87,7 +87,7 @@ public class TestFavoredNodesEndToEnd {
for (int i = 0; i < NUM_FILES; i++) { for (int i = 0; i < NUM_FILES; i++) {
Random rand = new Random(System.currentTimeMillis() + i); Random rand = new Random(System.currentTimeMillis() + i);
//pass a new created rand so as to get a uniform distribution each time //pass a new created rand so as to get a uniform distribution each time
//without too much collisions (look at the do-while loop in getMembers) //without too much collisions (look at the do-while loop in getDatanodes)
InetSocketAddress datanode[] = getDatanodes(rand); InetSocketAddress datanode[] = getDatanodes(rand);
Path p = new Path("/filename"+i); Path p = new Path("/filename"+i);
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true, FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
@ -168,7 +168,7 @@ public class TestFavoredNodesEndToEnd {
for (int i = 0; i < NUM_FILES; i++) { for (int i = 0; i < NUM_FILES; i++) {
Random rand = new Random(System.currentTimeMillis() + i); Random rand = new Random(System.currentTimeMillis() + i);
// pass a new created rand so as to get a uniform distribution each time // pass a new created rand so as to get a uniform distribution each time
// without too much collisions (look at the do-while loop in getMembers) // without too much collisions (look at the do-while loop in getDatanodes)
InetSocketAddress datanode[] = getDatanodes(rand); InetSocketAddress datanode[] = getDatanodes(rand);
Path p = new Path("/filename" + i); Path p = new Path("/filename" + i);
// create and close the file. // create and close the file.
@ -195,7 +195,7 @@ public class TestFavoredNodesEndToEnd {
for (int i = 0; i < NUM_FILES; i++) { for (int i = 0; i < NUM_FILES; i++) {
Random rand = new Random(System.currentTimeMillis() + i); Random rand = new Random(System.currentTimeMillis() + i);
//pass a new created rand so as to get a uniform distribution each time //pass a new created rand so as to get a uniform distribution each time
//without too much collisions (look at the do-while loop in getMembers) //without too much collisions (look at the do-while loop in getDatanodes)
InetSocketAddress[] dns = getDatanodes(rand); InetSocketAddress[] dns = getDatanodes(rand);
Path p = new Path("/filename"+i); Path p = new Path("/filename"+i);
FSDataOutputStream out = FSDataOutputStream out =

View File

@ -23,10 +23,8 @@ import com.google.common.base.Preconditions;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
@ -49,7 +47,6 @@ import org.apache.hadoop.test.GenericTestUtils;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
import org.apache.hadoop.util.ServicePlugin;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.slf4j.event.Level; import org.slf4j.event.Level;
@ -242,7 +239,7 @@ public final class MiniOzoneClassicCluster extends MiniDFSCluster
// An Ozone request may originate at any DataNode, so pick one at random. // An Ozone request may originate at any DataNode, so pick one at random.
int dnIndex = new Random().nextInt(getDataNodes().size()); int dnIndex = new Random().nextInt(getDataNodes().size());
String uri = String.format("http://127.0.0.1:%d", String uri = String.format("http://127.0.0.1:%d",
getOzoneRestPort(getDataNodes().get(dnIndex))); MiniOzoneTestHelper.getOzoneRestPort(getDataNodes().get(dnIndex)));
LOG.info("Creating Ozone client to DataNode {} with URI {} and user {}", LOG.info("Creating Ozone client to DataNode {} with URI {} and user {}",
dnIndex, uri, USER_AUTH); dnIndex, uri, USER_AUTH);
try { try {
@ -339,20 +336,6 @@ public final class MiniOzoneClassicCluster extends MiniDFSCluster
4 * 1000); 4 * 1000);
} }
public static DatanodeDetails getDatanodeDetails(DataNode dataNode) {
DatanodeDetails datanodeDetails = null;
for (ServicePlugin plugin : dataNode.getPlugins()) {
if (plugin instanceof HddsDatanodeService) {
datanodeDetails = ((HddsDatanodeService) plugin).getDatanodeDetails();
}
}
return datanodeDetails;
}
public static int getOzoneRestPort(DataNode dataNode) {
return getDatanodeDetails(dataNode).getOzoneRestPort();
}
/** /**
* Builder for configuring the MiniOzoneCluster to run. * Builder for configuring the MiniOzoneCluster to run.
*/ */

View File

@ -25,6 +25,9 @@ import org.apache.hadoop.ozone.container.common.statemachine
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.ServicePlugin;
import java.lang.reflect.Field;
import java.util.List;
/** /**
* Stateless helper functions for MiniOzone based tests. * Stateless helper functions for MiniOzone based tests.
*/ */
@ -37,6 +40,10 @@ public class MiniOzoneTestHelper {
return findHddsPlugin(dataNode).getDatanodeDetails(); return findHddsPlugin(dataNode).getDatanodeDetails();
} }
public static int getOzoneRestPort(DataNode dataNode) {
return MiniOzoneTestHelper.getDatanodeDetails(dataNode).getOzoneRestPort();
}
public static OzoneContainer getOzoneContainer(DataNode dataNode) { public static OzoneContainer getOzoneContainer(DataNode dataNode) {
return findHddsPlugin(dataNode).getDatanodeStateMachine() return findHddsPlugin(dataNode).getDatanodeStateMachine()
.getContainer(); .getContainer();
@ -52,10 +59,19 @@ public class MiniOzoneTestHelper {
} }
private static HddsDatanodeService findHddsPlugin(DataNode dataNode) { private static HddsDatanodeService findHddsPlugin(DataNode dataNode) {
for (ServicePlugin plugin : dataNode.getPlugins()) { try {
if (plugin instanceof HddsDatanodeService) { Field pluginsField = DataNode.class.getDeclaredField("plugins");
return (HddsDatanodeService) plugin; pluginsField.setAccessible(true);
List<ServicePlugin> plugins =
(List<ServicePlugin>) pluginsField.get(dataNode);
for (ServicePlugin plugin : plugins) {
if (plugin instanceof HddsDatanodeService) {
return (HddsDatanodeService) plugin;
}
} }
} catch (NoSuchFieldException | IllegalAccessException e) {
e.printStackTrace();
} }
throw new IllegalStateException("Can't find the Hdds server plugin in the" throw new IllegalStateException("Can't find the Hdds server plugin in the"
+ " plugin collection of datanode"); + " plugin collection of datanode");

View File

@ -80,7 +80,7 @@ public interface RatisTestHelper {
} }
public int getDatanodeOzoneRestPort() { public int getDatanodeOzoneRestPort() {
return MiniOzoneClassicCluster.getOzoneRestPort( return MiniOzoneTestHelper.getOzoneRestPort(
cluster.getDataNodes().get(0)); cluster.getDataNodes().get(0));
} }
} }

View File

@ -95,12 +95,8 @@ public class TestMiniOzoneCluster {
for(DataNode dn : datanodes) { for(DataNode dn : datanodes) {
// Create a single member pipe line // Create a single member pipe line
String containerName = OzoneUtils.getRequestID(); String containerName = OzoneUtils.getRequestID();
DatanodeDetails datanodeDetails = null; DatanodeDetails datanodeDetails =
for (ServicePlugin plugin : dn.getPlugins()) { MiniOzoneTestHelper.getDatanodeDetails(dn);
if (plugin instanceof HddsDatanodeService) {
datanodeDetails = ((HddsDatanodeService) plugin).getDatanodeDetails();
}
}
final PipelineChannel pipelineChannel = final PipelineChannel pipelineChannel =
new PipelineChannel(datanodeDetails.getUuidString(), new PipelineChannel(datanodeDetails.getUuidString(),
HddsProtos.LifeCycleState.OPEN, HddsProtos.LifeCycleState.OPEN,

View File

@ -15,11 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.ozone.web; package org.apache.hadoop.ozone;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable; import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.client.rest.headers.Header; import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.http.HttpResponse; import org.apache.http.HttpResponse;

View File

@ -170,7 +170,7 @@ public class TestStorageContainerManagerHelper {
private OzoneContainer getContainerServerByDatanodeUuid(String dnUUID) private OzoneContainer getContainerServerByDatanodeUuid(String dnUUID)
throws IOException { throws IOException {
for (DataNode dn : cluster.getDataNodes()) { for (DataNode dn : cluster.getDataNodes()) {
if (MiniOzoneClassicCluster.getDatanodeDetails(dn).getUuidString() if (MiniOzoneTestHelper.getDatanodeDetails(dn).getUuidString()
.equals(dnUUID)) { .equals(dnUUID)) {
return MiniOzoneTestHelper.getOzoneContainer(dn); return MiniOzoneTestHelper.getOzoneContainer(dn);
} }

View File

@ -86,7 +86,7 @@ public class TestCloseContainerHandler {
Assert.assertFalse(isContainerClosed(cluster, containerName)); Assert.assertFalse(isContainerClosed(cluster, containerName));
DatanodeDetails datanodeDetails = MiniOzoneClassicCluster DatanodeDetails datanodeDetails = MiniOzoneTestHelper
.getDatanodeDetails(cluster.getDataNodes().get(0)); .getDatanodeDetails(cluster.getDataNodes().get(0));
//send the order to close the container //send the order to close the container
cluster.getStorageContainerManager().getScmNodeManager() cluster.getStorageContainerManager().getScmNodeManager()

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.container.ozoneimpl;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.RatisTestHelper; import org.apache.hadoop.ozone.RatisTestHelper;
import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.ContainerTestHelper;
@ -88,7 +89,7 @@ public class TestOzoneContainerRatis {
final Pipeline pipeline = ContainerTestHelper.createPipeline( final Pipeline pipeline = ContainerTestHelper.createPipeline(
containerName, containerName,
CollectionUtils.as(datanodes, CollectionUtils.as(datanodes,
MiniOzoneClassicCluster::getDatanodeDetails)); MiniOzoneTestHelper::getDatanodeDetails));
LOG.info("pipeline=" + pipeline); LOG.info("pipeline=" + pipeline);
// Create Ratis cluster // Create Ratis cluster

View File

@ -22,6 +22,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.RatisTestHelper; import org.apache.hadoop.ozone.RatisTestHelper;
import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.ContainerTestHelper;
@ -84,7 +85,7 @@ public class TestRatisManager {
final List<DataNode> datanodes = cluster.getDataNodes(); final List<DataNode> datanodes = cluster.getDataNodes();
final List<DatanodeDetails> datanodeDetailsSet = datanodes.stream() final List<DatanodeDetails> datanodeDetailsSet = datanodes.stream()
.map(MiniOzoneClassicCluster::getDatanodeDetails).collect( .map(MiniOzoneTestHelper::getDatanodeDetails).collect(
Collectors.toList()); Collectors.toList());
//final RatisManager manager = RatisManager.newRatisManager(conf); //final RatisManager manager = RatisManager.newRatisManager(conf);

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
import org.apache.hadoop.ozone.protocol.proto import org.apache.hadoop.ozone.protocol.proto
@ -124,7 +125,7 @@ public class TestKeySpaceManagerRestInterface {
switch (type) { switch (type) {
case HTTP: case HTTP:
case HTTPS: case HTTPS:
Assert.assertEquals(MiniOzoneClassicCluster.getOzoneRestPort(datanode), Assert.assertEquals(MiniOzoneTestHelper.getOzoneRestPort(datanode),
(int) ports.get(type)); (int) ports.get(type));
break; break;
default: default:

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneAcl.OzoneACLRights; import org.apache.hadoop.ozone.OzoneAcl.OzoneACLRights;
import org.apache.hadoop.ozone.OzoneAcl.OzoneACLType; import org.apache.hadoop.ozone.OzoneAcl.OzoneACLType;
@ -117,7 +118,7 @@ public class TestOzoneShell {
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = new MiniOzoneClassicCluster.Builder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
DataNode dataNode = cluster.getDataNodes().get(0); DataNode dataNode = cluster.getDataNodes().get(0);
final int port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode); final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
url = String.format("http://localhost:%d", port); url = String.format("http://localhost:%d", port);
client = new OzoneRestClient(String.format("http://localhost:%d", port)); client = new OzoneRestClient(String.format("http://localhost:%d", port));
client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER); client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);

View File

@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.StorageContainerManager;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
@ -234,7 +235,7 @@ public class TestSCMCli {
@Test @Test
public void testInfoContainer() throws Exception { public void testInfoContainer() throws Exception {
// The cluster has one Datanode server. // The cluster has one Datanode server.
DatanodeDetails datanodeDetails = MiniOzoneClassicCluster DatanodeDetails datanodeDetails = MiniOzoneTestHelper
.getDatanodeDetails(cluster.getDataNodes().get(0)); .getDatanodeDetails(cluster.getDataNodes().get(0));
String formatStr = String formatStr =
"Container Name: %s\n" + "Container Name: %s\n" +

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ContainerReport; import org.apache.hadoop.ozone.container.common.helpers.ContainerReport;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
@ -173,7 +174,7 @@ public class TestSCMMetrics {
StorageContainerManager scmManager = cluster.getStorageContainerManager(); StorageContainerManager scmManager = cluster.getStorageContainerManager();
DataNode dataNode = cluster.getDataNodes().get(0); DataNode dataNode = cluster.getDataNodes().get(0);
String datanodeUuid = MiniOzoneClassicCluster.getDatanodeDetails(dataNode) String datanodeUuid = MiniOzoneTestHelper.getDatanodeDetails(dataNode)
.getUuidString(); .getUuidString();
ContainerReportsRequestProto request = createContainerReport(numReport, ContainerReportsRequestProto request = createContainerReport(numReport,
stat, datanodeUuid); stat, datanodeUuid);

View File

@ -19,9 +19,11 @@ package org.apache.hadoop.ozone.web;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestOzoneHelper;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.junit.Rule; import org.junit.Rule;
@ -67,7 +69,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = new MiniOzoneClassicCluster.Builder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
DataNode dataNode = cluster.getDataNodes().get(0); DataNode dataNode = cluster.getDataNodes().get(0);
port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode); port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
} }
/** /**

View File

@ -19,9 +19,11 @@ package org.apache.hadoop.ozone.web;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestOzoneHelper;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
@ -70,7 +72,7 @@ public class TestLocalOzoneVolumes extends TestOzoneHelper {
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = new MiniOzoneClassicCluster.Builder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_LOCAL).build(); .setHandlerType(OzoneConsts.OZONE_HANDLER_LOCAL).build();
DataNode dataNode = cluster.getDataNodes().get(0); DataNode dataNode = cluster.getDataNodes().get(0);
port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode); port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
} }
/** /**

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.web;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
@ -79,7 +80,7 @@ public class TestOzoneWebAccess {
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = new MiniOzoneClassicCluster.Builder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_LOCAL).build(); .setHandlerType(OzoneConsts.OZONE_HANDLER_LOCAL).build();
DataNode dataNode = cluster.getDataNodes().get(0); DataNode dataNode = cluster.getDataNodes().get(0);
port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode); port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
} }
/** /**

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.web.client;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
@ -80,7 +81,7 @@ public class TestBuckets {
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = new MiniOzoneClassicCluster.Builder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
DataNode dataNode = cluster.getDataNodes().get(0); DataNode dataNode = cluster.getDataNodes().get(0);
final int port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode); final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
ozoneRestClient = new OzoneRestClient( ozoneRestClient = new OzoneRestClient(
String.format("http://localhost:%d", port)); String.format("http://localhost:%d", port));
} }

View File

@ -111,7 +111,7 @@ public class TestKeys {
ozoneCluster = new MiniOzoneClassicCluster.Builder(conf) ozoneCluster = new MiniOzoneClassicCluster.Builder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
DataNode dataNode = ozoneCluster.getDataNodes().get(0); DataNode dataNode = ozoneCluster.getDataNodes().get(0);
final int port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode); final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
ozoneRestClient = new OzoneRestClient( ozoneRestClient = new OzoneRestClient(
String.format("http://localhost:%d", port)); String.format("http://localhost:%d", port));
currentTime = Time.now(); currentTime = Time.now();
@ -282,7 +282,7 @@ public class TestKeys {
cluster.restartDataNode(datanodeIdx); cluster.restartDataNode(datanodeIdx);
// refresh the datanode endpoint uri after datanode restart // refresh the datanode endpoint uri after datanode restart
DataNode dataNode = cluster.getDataNodes().get(datanodeIdx); DataNode dataNode = cluster.getDataNodes().get(datanodeIdx);
final int port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode); final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
client.setEndPoint(String.format("http://localhost:%d", port)); client.setEndPoint(String.format("http://localhost:%d", port));
} }

View File

@ -45,6 +45,7 @@ import io.netty.handler.logging.LoggingHandler;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
@ -98,7 +99,7 @@ public class TestOzoneClient {
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
DataNode dataNode = cluster.getDataNodes().get(0); DataNode dataNode = cluster.getDataNodes().get(0);
endpoint = String.format("http://localhost:%d", endpoint = String.format("http://localhost:%d",
MiniOzoneClassicCluster.getOzoneRestPort(dataNode)); MiniOzoneTestHelper.getOzoneRestPort(dataNode));
} }
@AfterClass @AfterClass

View File

@ -24,6 +24,7 @@ import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneTestHelper;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.Status;
@ -90,7 +91,7 @@ public class TestVolume {
cluster = new MiniOzoneClassicCluster.Builder(conf) cluster = new MiniOzoneClassicCluster.Builder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
DataNode dataNode = cluster.getDataNodes().get(0); DataNode dataNode = cluster.getDataNodes().get(0);
final int port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode); final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
ozoneRestClient = new OzoneRestClient( ozoneRestClient = new OzoneRestClient(
String.format("http://localhost:%d", port)); String.format("http://localhost:%d", port));

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.web.netty;
import javax.servlet.FilterConfig; import javax.servlet.FilterConfig;
import javax.servlet.ServletContext; import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.net.BindException; import java.net.BindException;
@ -33,8 +32,6 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.hdfs.server.datanode.web
.RestCsrfPreventionFilterHandler;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.security.http.RestCsrfPreventionFilter; import org.apache.hadoop.security.http.RestCsrfPreventionFilter;
@ -54,10 +51,6 @@ import io.netty.handler.codec.http.HttpResponseEncoder;
import io.netty.handler.stream.ChunkedWriteHandler; import io.netty.handler.stream.ChunkedWriteHandler;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.HDDS_REST_CSRF_ENABLED_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.HDDS_REST_CSRF_ENABLED_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.HDDS_REST_HTTP_ADDRESS_DEFAULT; .HDDS_REST_HTTP_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
@ -75,7 +68,6 @@ public class ObjectStoreRestHttpServer implements Closeable {
private final ServerBootstrap httpServer; private final ServerBootstrap httpServer;
private final Configuration conf; private final Configuration conf;
private final Configuration confForCreate; private final Configuration confForCreate;
private final RestCsrfPreventionFilter restCsrfPreventionFilter;
private InetSocketAddress httpAddress; private InetSocketAddress httpAddress;
static final Log LOG = LogFactory.getLog(ObjectStoreRestHttpServer.class); static final Log LOG = LogFactory.getLog(ObjectStoreRestHttpServer.class);
private final ObjectStoreHandler objectStoreHandler; private final ObjectStoreHandler objectStoreHandler;
@ -83,7 +75,6 @@ public class ObjectStoreRestHttpServer implements Closeable {
public ObjectStoreRestHttpServer(final Configuration conf, public ObjectStoreRestHttpServer(final Configuration conf,
final ServerSocketChannel externalHttpChannel, final ServerSocketChannel externalHttpChannel,
ObjectStoreHandler objectStoreHandler) throws IOException { ObjectStoreHandler objectStoreHandler) throws IOException {
this.restCsrfPreventionFilter = createRestCsrfPreventionFilter(conf);
this.conf = conf; this.conf = conf;
this.confForCreate = new Configuration(conf); this.confForCreate = new Configuration(conf);
@ -101,11 +92,7 @@ public class ObjectStoreRestHttpServer implements Closeable {
protected void initChannel(SocketChannel ch) throws Exception { protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline(); ChannelPipeline p = ch.pipeline();
p.addLast(new HttpRequestDecoder(), new HttpResponseEncoder()); p.addLast(new HttpRequestDecoder(), new HttpResponseEncoder());
if (restCsrfPreventionFilter != null) { // Later we have to support cross-site request forgery (CSRF) Filter
p.addLast(
new RestCsrfPreventionFilterHandler(restCsrfPreventionFilter));
}
p.addLast(new ChunkedWriteHandler(), new ObjectStoreURLDispatcher( p.addLast(new ChunkedWriteHandler(), new ObjectStoreURLDispatcher(
objectStoreHandler.getObjectStoreJerseyContainer())); objectStoreHandler.getObjectStoreJerseyContainer()));
} }
@ -172,36 +159,6 @@ public class ObjectStoreRestHttpServer implements Closeable {
} }
} }
/**
* Creates the {@link RestCsrfPreventionFilter} for the DataNode. Since the
* DataNode HTTP server is not implemented in terms of the servlet API, it
* takes some extra effort to obtain an instance of the filter. This method
* takes care of configuration and implementing just enough of the servlet API
* and related interfaces so that the DataNode can get a fully initialized
* instance of the filter.
*
* @param conf configuration to read
* @return initialized filter, or null if CSRF protection not enabled
*/
private static RestCsrfPreventionFilter createRestCsrfPreventionFilter(
Configuration conf) {
if (!conf.getBoolean(HDDS_REST_CSRF_ENABLED_KEY,
HDDS_REST_CSRF_ENABLED_DEFAULT)) {
return null;
}
String restCsrfClassName = RestCsrfPreventionFilter.class.getName();
Map<String, String> restCsrfParams = RestCsrfPreventionFilter
.getFilterParams(conf, "dfs.webhdfs.rest-csrf.");
RestCsrfPreventionFilter filter = new RestCsrfPreventionFilter();
try {
filter.init(new MapBasedFilterConfig(restCsrfClassName, restCsrfParams));
} catch (ServletException e) {
throw new IllegalStateException(
"Failed to initialize RestCsrfPreventionFilter.", e);
}
return filter;
}
/** /**
* A minimal {@link FilterConfig} implementation backed by a {@link Map}. * A minimal {@link FilterConfig} implementation backed by a {@link Map}.
*/ */