diff --git a/dev-support/bin/dist-layout-stitching b/dev-support/bin/dist-layout-stitching index f3db5427f21..6557161c162 100755 --- a/dev-support/bin/dist-layout-stitching +++ b/dev-support/bin/dist-layout-stitching @@ -21,6 +21,9 @@ VERSION=$1 # project.build.directory BASEDIR=$2 +#hdds.version +HDDS_VERSION=$3 + function run() { declare res @@ -132,7 +135,6 @@ run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-native-client/target/hadoop-hd run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-rbf/target/hadoop-hdfs-rbf-${VERSION}" . run copy "${ROOT}/hadoop-yarn-project/target/hadoop-yarn-project-${VERSION}" . run copy "${ROOT}/hadoop-mapreduce-project/target/hadoop-mapreduce-${VERSION}" . -run copy "${ROOT}/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${VERSION}" . #copy httpfs and kms as is run cp -pr "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${VERSION}"/* . @@ -144,6 +146,24 @@ run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-api/target/hadoop-client- run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-runtime/target/hadoop-client-runtime-${VERSION}.jar" share/hadoop/client/ run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-minicluster/target/hadoop-client-minicluster-${VERSION}.jar" share/hadoop/client/ +# HDDS +run copy "${ROOT}/hadoop-hdds/common/target/hadoop-hdds-common-${HDDS_VERSION}" . +run copy "${ROOT}/hadoop-hdds/framework/target/hadoop-hdds-server-framework-${HDDS_VERSION}" . +run copy "${ROOT}/hadoop-hdds/server-scm/target/hadoop-hdds-server-scm-${HDDS_VERSION}" . +run copy "${ROOT}/hadoop-hdds/container-service/target/hadoop-hdds-container-service-${HDDS_VERSION}" . +run copy "${ROOT}/hadoop-hdds/client/target/hadoop-hdds-client-${HDDS_VERSION}" . +run copy "${ROOT}/hadoop-hdds/tools/target/hadoop-hdds-tools-${HDDS_VERSION}" . + +# Ozone +run copy "${ROOT}/hadoop-ozone/common/target/hadoop-ozone-common-${HDDS_VERSION}" . +run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-${HDDS_VERSION}" . +run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" . +run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" . +run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" . + +run copy "${ROOT}/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${VERSION}" . + + echo echo "Hadoop dist layout available at: ${BASEDIR}/hadoop-${VERSION}" echo diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh index 24aacdfe1c1..6573a81eb5b 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh @@ -402,7 +402,24 @@ esac # and therefore may override any similar flags set in HADOOP_OPTS # # export HDFS_DFSROUTER_OPTS="" + ### +# HDFS Key Space Manager specific parameters +### +# Specify the JVM options to be used when starting the HDFS Key Space Manager. +# These options will be appended to the options specified as HADOOP_OPTS +# and therefore may override any similar flags set in HADOOP_OPTS +# +# export HDFS_KSM_OPTS="" + +### +# HDFS StorageContainerManager specific parameters +### +# Specify the JVM options to be used when starting the HDFS Storage Container Manager. +# These options will be appended to the options specified as HADOOP_OPTS +# and therefore may override any similar flags set in HADOOP_OPTS +# +# export HDFS_STORAGECONTAINERMANAGER_OPTS="" ### # Advanced Users Only! diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index 6933cf404e8..5783013040d 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -293,6 +293,40 @@ log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval} log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds} log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages} + +# Fair scheduler requests log on state dump +log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=DEBUG,FSLOGGER +log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=false +log4j.appender.FSLOGGER=org.apache.log4j.RollingFileAppender +log4j.appender.FSLOGGER.File=${hadoop.log.dir}/fairscheduler-statedump.log +log4j.appender.FSLOGGER.layout=org.apache.log4j.PatternLayout +log4j.appender.FSLOGGER.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +log4j.appender.FSLOGGER.MaxFileSize=${hadoop.log.maxfilesize} +log4j.appender.FSLOGGER.MaxBackupIndex=${hadoop.log.maxbackupindex} + +# +# Add a logger for ozone that is separate from the Datanode. +# +log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE + +# Do not log into datanode logs. Remove this line to have single log. +log4j.additivity.org.apache.hadoop.ozone=false + +# For development purposes, log both to console and log file. +log4j.appender.OZONE=org.apache.log4j.ConsoleAppender +log4j.appender.OZONE.Threshold=info +log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout +log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \ + %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n + +# Real ozone logger that writes to ozone.log +log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender +log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log +log4j.appender.FILE.Threshold=debug +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout +log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \ +(%F:%L) %X{function} %X{resource} %X{user} %X{request} - \ +%m%n # # Fair scheduler state dump # diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java index 23668c57b4b..4c3dae9a9f9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java @@ -161,32 +161,6 @@ public class ConfigUtil { targets); } - /** - * - * @param conf - * @param mountTableName - * @param src - * @param settings - * @param targets - */ - public static void addLinkNfly(Configuration conf, String mountTableName, - String src, String settings, final URI ... targets) { - - settings = settings == null - ? "minReplication=2,repairOnRead=true" - : settings; - - conf.set(getConfigViewFsPrefix(mountTableName) + "." + - Constants.CONFIG_VIEWFS_LINK_NFLY + "." + settings + "." + src, - StringUtils.uriToString(targets)); - } - - public static void addLinkNfly(final Configuration conf, final String src, - final URI ... targets) { - addLinkNfly(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, src, null, - targets); - } - /** * Add config variable for homedir for default mount table * @param conf - add to this conf diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java index 0f99aa3f7ad..daf91e22ecc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java @@ -179,13 +179,4 @@ public final class CodecRegistry { public Map getCodec2CoderCompactMap() { return coderNameCompactMap; } - - /** - * Get all codec names and their corresponding coder list. - * @return a map of all codec names, and their corresponding code list - * separated by ','. - */ - public HashMap getCodec2CoderCompactMap() { - return coderNameCompactMap; - } } diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index f32268bee83..fd72618d919 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -1685,6 +1685,19 @@ + + + fs.o3.impl + org.apache.hadoop.fs.ozone.OzoneFileSystem + The implementation class of the Ozone FileSystem. + + + + fs.AbstractFileSystem.o3.impl + org.apache.hadoop.fs.ozone.OzFs + The implementation class of the OzFs AbstractFileSystem. + + diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java index 6ca9c78cb2b..023c83109e5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java @@ -101,6 +101,9 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase { // S3A properties are in a different subtree. xmlPrefixToSkipCompare.add("fs.s3a."); + // O3 properties are in a different subtree. + xmlPrefixToSkipCompare.add("fs.o3."); + //ftp properties are in a different subtree. // - org.apache.hadoop.fs.ftp.FTPFileSystem. xmlPrefixToSkipCompare.add("fs.ftp.impl"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java index 8cdc52b29d4..cf42219f6d8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java @@ -247,62 +247,4 @@ public class TestConfServlet { } assertEquals("", sw.toString()); } - - @Test - public void testOzoneConfigTagCommands() throws Exception { - StringWriter sw = null; - PrintWriter pw = null; - ConfServlet service = null; - try { - service = new ConfServlet(); - ServletContext context = mock(ServletContext.class); - ServletConfig servletConf = mock(ServletConfig.class); - service.init(servletConf); - Configuration config = mock(OzoneConfiguration.class); - when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)) - .thenReturn(new Configuration()); - when(service.getServletContext()) - .thenReturn(context); - - HttpServletRequest request = mock(HttpServletRequest.class); - when(request.getParameter("cmd")) - .thenReturn("getOzoneTags"); - HttpServletResponse response = mock(HttpServletResponse.class); - sw = new StringWriter(); - pw = new PrintWriter(sw); - when(response.getWriter()).thenReturn(pw); - - // response request - service.doGet(request, response); - String result = sw.toString().trim(); - - for (OzonePropertyTag ozoneTag : OzonePropertyTag.values()) { - assertTrue(result.contains(ozoneTag.toString())); - } - - when(request.getParameter("cmd")) - .thenReturn("badCommand"); - when(request.getParameter("tags")) - .thenReturn("OZONE,CLIENT"); - when(request.getParameter("group")) - .thenReturn("ozone"); - - service.doGet(request, response); - Mockito.verify(response).sendError( - Mockito.eq(HttpServletResponse.SC_NOT_FOUND), - Mockito.eq("badCommand is not a valid command.")); - - } finally { - if (sw != null) { - sw.close(); - } - if (pw != null) { - pw.close(); - } - if (service != null) { - service.destroy(); - } - } - - } } \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java new file mode 100644 index 00000000000..e0e826c471b --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.common.impl; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.datanode.StorageLocation; +import org.apache.hadoop.metrics2.util.MBeans; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.container.common.interfaces + .ContainerLocationManager; +import org.apache.hadoop.ozone.container.common.interfaces + .ContainerLocationManagerMXBean; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.management.ObjectName; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.LinkedList; +import java.util.List; + +/** + * A class that tells the ContainerManager where to place the containers. + * Please note : There is *no* one-to-one correlation between metadata + * Locations and data Locations. + * + * For example : A user could map all container files to a + * SSD but leave data/metadata on bunch of other disks. + */ +public class ContainerLocationManagerImpl implements ContainerLocationManager, + ContainerLocationManagerMXBean { + private static final Logger LOG = + LoggerFactory.getLogger(ContainerLocationManagerImpl.class); + + private final List dataLocations; + private int currentIndex; + private final List metadataLocations; + private final ObjectName jmxbean; + + /** + * Constructs a Location Manager. + * @param metadataLocations - Refers to the metadataLocations + * where we store the container metadata. + * @param dataDirs - metadataLocations where we store the actual + * data or chunk files. + * @param conf - configuration. + * @throws IOException + */ + public ContainerLocationManagerImpl(List metadataLocations, + List dataDirs, Configuration conf) + throws IOException { + dataLocations = new LinkedList<>(); + for (StorageLocation dataDir : dataDirs) { + dataLocations.add(new ContainerStorageLocation(dataDir, conf)); + } + this.metadataLocations = metadataLocations; + jmxbean = MBeans.register("OzoneDataNode", + ContainerLocationManager.class.getSimpleName(), this); + } + + /** + * Returns the path where the container should be placed from a set of + * metadataLocations. + * + * @return A path where we should place this container and metadata. + * @throws IOException + */ + @Override + public Path getContainerPath() + throws IOException { + Preconditions.checkState(metadataLocations.size() > 0); + int index = currentIndex % metadataLocations.size(); + return Paths.get(metadataLocations.get(index).getNormalizedUri()); + } + + /** + * Returns the path where the container Data file are stored. + * + * @return a path where we place the LevelDB and data files of a container. + * @throws IOException + */ + @Override + public Path getDataPath(String containerName) throws IOException { + Path currentPath = Paths.get( + dataLocations.get(currentIndex++ % dataLocations.size()) + .getNormalizedUri()); + currentPath = currentPath.resolve(OzoneConsts.CONTAINER_PREFIX); + return currentPath.resolve(containerName); + } + + @Override + public StorageLocationReport[] getLocationReport() throws IOException { + StorageLocationReport[] reports = + new StorageLocationReport[dataLocations.size()]; + for (int idx = 0; idx < dataLocations.size(); idx++) { + ContainerStorageLocation loc = dataLocations.get(idx); + long scmUsed = 0; + long remaining = 0; + try { + scmUsed = loc.getScmUsed(); + remaining = loc.getAvailable(); + } catch (IOException ex) { + LOG.warn("Failed to get scmUsed and remaining for container " + + "storage location {}", loc.getNormalizedUri()); + // reset scmUsed and remaining if df/du failed. + scmUsed = 0; + remaining = 0; + } + + // TODO: handle failed storage + // For now, include storage report for location that failed to get df/du. + StorageLocationReport r = new StorageLocationReport( + loc.getStorageUuId(), false, loc.getCapacity(), + scmUsed, remaining); + reports[idx] = r; + } + return reports; + } + + /** + * Supports clean shutdown of container location du threads. + * + * @throws IOException + */ + @Override + public void shutdown() throws IOException { + for (ContainerStorageLocation loc: dataLocations) { + loc.shutdown(); + } + MBeans.unregister(jmxbean); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 44f21b40492..3b770f3f80c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -2,9 +2,6 @@ - - - @@ -17,12 +14,6 @@ - - - - - - diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 67a5d24b6d1..f8b17223e3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -168,6 +168,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> netty compile + + io.netty + netty-all + compile + org.apache.htrace htrace-core4 @@ -187,17 +192,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.fusesource.leveldbjni leveldbjni-all - - org.rocksdb - rocksdbjni - 5.8.0 - - - io.swagger - swagger-annotations - 1.5.9 - provided - org.bouncycastle @@ -218,16 +212,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> assertj-core test - - org.jctools - jctools-core - true - - - org.xerial - sqlite-jdbc - 3.8.7 - @@ -320,7 +304,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> - @@ -420,14 +403,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> src/main/webapps/static/dataTables.bootstrap.js src/main/webapps/static/d3-v4.1.1.min.js src/test/resources/diskBalancer/data-cluster-3node-3disk.json - src/main/webapps/static/nvd3-1.8.5.min.css.map - src/main/webapps/static/nvd3-1.8.5.min.js - src/main/webapps/static/angular-route-1.6.4.min.js - src/main/webapps/static/nvd3-1.8.5.min.css - src/main/webapps/static/angular-nvd3-1.0.9.min.js - src/main/webapps/static/nvd3-1.8.5.min.js.map - src/main/webapps/static/angular-1.6.4.min.js - src/main/webapps/static/d3-3.5.17.min.js @@ -440,55 +415,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> configuration.xsl hdfs-default.xml - ozone-default.xml false - - com.github.kongchen - swagger-maven-plugin - 3.1.5 - - - compile - - generate - - - - - - - false - target/webapps/static - ozone.swagger - - http - - localhost:9864 - / - - org.apache.hadoop.ozone.web.interfaces - - - HDFS Ozone REST Api - ${project.version} - - Apache Hadoop project - https://hadoop.apache.org - - - http://www.apache.org/licenses/LICENSE-2.0.html - Apache 2.0 - - - - - - diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 84bac2d83fc..d68669f2b0d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -1762,25 +1761,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } } - @Override - public GetErasureCodingCodecsResponseProto getErasureCodingCodecs( - RpcController controller, GetErasureCodingCodecsRequestProto request) - throws ServiceException { - try { - HashMap codecs = server.getErasureCodingCodecs(); - GetErasureCodingCodecsResponseProto.Builder resBuilder = - GetErasureCodingCodecsResponseProto.newBuilder(); - for (Map.Entry codec : codecs.entrySet()) { - resBuilder.addCodec( - PBHelperClient.convertErasureCodingCodec( - codec.getKey(), codec.getValue())); - } - return resBuilder.build(); - } catch (IOException e) { - throw new ServiceException(e); - } - } - @Override public AddErasureCodingPoliciesResponseProto addErasureCodingPolicies( RpcController controller, AddErasureCodingPoliciesRequestProto request) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index b1a7750619d..96b0f36f9e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -111,7 +111,6 @@ import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.datanode.checker.DatasetVolumeChecker; import org.apache.hadoop.hdfs.server.datanode.checker.StorageLocationChecker; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.hdfs.client.BlockReportOptions; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -1575,21 +1574,7 @@ public class DataNode extends ReconfigurableBase registerBlockPoolWithSecretManager(bpRegistration, blockPoolId); } - - @VisibleForTesting - public OzoneContainer getOzoneContainerManager() { - return this.datanodeStateMachine.getContainer(); - } - - @VisibleForTesting - public DatanodeStateMachine.DatanodeStates getOzoneStateMachineState() { - if (this.datanodeStateMachine != null) { - return this.datanodeStateMachine.getContext().getState(); - } - // if the state machine doesn't exist then DN initialization is in progress - return DatanodeStateMachine.DatanodeStates.INIT; - } - + /** * After the block pool has contacted the NN, registers that block pool * with the secret manager, updating it with the secrets provided by the NN. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index d5bf29ed125..752c830f958 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -7655,20 +7655,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } } - /** - * Get available erasure coding codecs and corresponding coders. - */ - HashMap getErasureCodingCodecs() throws IOException { - checkOperation(OperationCategory.READ); - readLock(); - try { - checkOperation(OperationCategory.READ); - return FSDirErasureCodingOp.getErasureCodingCodecs(this); - } finally { - readUnlock("getErasureCodingCodecs"); - } - } - void setXAttr(String src, XAttr xAttr, EnumSet flag, boolean logRetryCache) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index ee0fad05976..921c1660864 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4018,7 +4018,7 @@ dfs.journalnode.enable.sync - false + true If true, the journal nodes wil sync with each other. The journal nodes will periodically gossip with other journal nodes to compare edit log diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 4564595c995..6530720b361 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -894,9 +894,6 @@ public class TestDataNodeHotSwapVolumes { 1, fsVolumeReferences.size()); } - // Add a new DataNode to help with the pipeline recover. - cluster.startDataNodes(conf, 1, true, null, null, null); - // Verify the file has sufficient replications. DFSTestUtil.waitReplication(fs, testFile, REPLICATION); // Read the content back @@ -928,32 +925,6 @@ public class TestDataNodeHotSwapVolumes { assertTrue(String.format("DataNode(%d) should have more than 1 blocks", dataNodeIdx), blockCount > 1); } - - // Write more files to make sure that the DataNode that has removed volume - // is still alive to receive data. - for (int i = 0; i < 10; i++) { - final Path file = new Path("/after-" + i); - try (FSDataOutputStream fout = fs.create(file, REPLICATION)) { - rb.nextBytes(writeBuf); - fout.write(writeBuf); - } - } - - try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi - .getFsVolumeReferences()) { - assertEquals("Volume remove wasn't successful.", - 1, fsVolumeReferences.size()); - FsVolumeSpi volume = fsVolumeReferences.get(0); - String bpid = cluster.getNamesystem().getBlockPoolId(); - FsVolumeSpi.BlockIterator blkIter = volume.newBlockIterator(bpid, "test"); - int blockCount = 0; - while (!blkIter.atEnd()) { - blkIter.nextBlock(); - blockCount++; - } - assertTrue(String.format("DataNode(%d) should have more than 1 blocks", - dataNodeIdx), blockCount > 1); - } } @Test(timeout=60000) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 4d60e9e0dcb..651b10f1cdd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -81,7 +81,12 @@ public class TestContainerServer { ContainerTestHelper.createSingleNodePipeline(containerName)); channel.writeInbound(request); Assert.assertTrue(channel.finish()); - ContainerCommandResponseProto response = channel.readOutbound(); + + Object responseObject = channel.readOutbound(); + Assert.assertTrue(responseObject instanceof + ContainerCommandResponseProto); + ContainerCommandResponseProto response = + (ContainerCommandResponseProto) responseObject; Assert.assertTrue(request.getTraceID().equals(response.getTraceID())); } finally { if (channel != null) { diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreChannelHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreChannelHandler.java index b2d4567116c..89c196cf506 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreChannelHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreChannelHandler.java @@ -26,9 +26,9 @@ import io.netty.handler.codec.http.HttpResponseStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION; -import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; -import static io.netty.handler.codec.http.HttpHeaderValues.CLOSE; +import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION; +import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_LENGTH; +import static io.netty.handler.codec.http.HttpHeaders.Values.CLOSE; import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreJerseyContainer.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreJerseyContainer.java index 2ca36b40a20..c7b516f9f1a 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreJerseyContainer.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreJerseyContainer.java @@ -46,7 +46,7 @@ import com.sun.jersey.spi.container.ContainerResponseWriter; import com.sun.jersey.spi.container.WebApplication; import io.netty.handler.codec.http.DefaultHttpResponse; -import io.netty.handler.codec.http.HttpHeaderUtil; +//import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; @@ -263,7 +263,7 @@ public final class ObjectStoreJerseyContainer { this.nettyResp = jerseyResponseToNettyResponse(jerseyResp); this.nettyResp.headers().set(CONTENT_LENGTH, Math.max(0, contentLength)); this.nettyResp.headers().set(CONNECTION, - HttpHeaderUtil.isKeepAlive(this.nettyReq) ? KEEP_ALIVE : CLOSE); + HttpHeaders.isKeepAlive(this.nettyReq) ? KEEP_ALIVE : CLOSE); this.latch.countDown(); LOG.trace( "end writeStatusAndHeaders, contentLength = {}, jerseyResp = {}.", @@ -340,9 +340,9 @@ public final class ObjectStoreJerseyContainer { String host = nettyHeaders.get(HOST); String scheme = host.startsWith("https") ? "https://" : "http://"; String baseUri = scheme + host + "/"; - String reqUri = scheme + host + nettyReq.uri(); + String reqUri = scheme + host + nettyReq.getUri(); LOG.trace("baseUri = {}, reqUri = {}", baseUri, reqUri); - return new ContainerRequest(webapp, nettyReq.method().name(), + return new ContainerRequest(webapp, nettyReq.getMethod().name(), new URI(baseUri), new URI(reqUri), jerseyHeaders, reqIn); } } diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestContentObjectStoreChannelHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestContentObjectStoreChannelHandler.java index 2f79080aa3f..0a2f22d6b13 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestContentObjectStoreChannelHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestContentObjectStoreChannelHandler.java @@ -21,7 +21,7 @@ import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.http.HttpContent; -import io.netty.handler.codec.http.HttpHeaderUtil; +import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.LastHttpContent; @@ -85,7 +85,7 @@ public final class RequestContentObjectStoreChannelHandler ChannelFuture respFuture = ctx.writeAndFlush(new ChunkedStream( this.respIn)); respFuture.addListener(new CloseableCleanupListener(this.respIn)); - if (!HttpHeaderUtil.isKeepAlive(this.nettyReq)) { + if (!HttpHeaders.isKeepAlive(this.nettyReq)) { respFuture.addListener(ChannelFutureListener.CLOSE); } else { respFuture.addListener(new ChannelFutureListener() { diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestDispatchObjectStoreChannelHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestDispatchObjectStoreChannelHandler.java index 78936244ba9..add827a6740 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestDispatchObjectStoreChannelHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/RequestDispatchObjectStoreChannelHandler.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.web.netty; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.http.DefaultFullHttpResponse; -import io.netty.handler.codec.http.HttpHeaderUtil; +import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; import org.apache.hadoop.io.IOUtils; @@ -67,7 +67,7 @@ public final class RequestDispatchObjectStoreChannelHandler throws Exception { LOG.trace("begin RequestDispatchObjectStoreChannelHandler channelRead0, " + "ctx = {}, nettyReq = {}", ctx, nettyReq); - if (!nettyReq.decoderResult().isSuccess()) { + if (!nettyReq.getDecoderResult().isSuccess()) { sendErrorResponse(ctx, BAD_REQUEST); return; } @@ -77,7 +77,7 @@ public final class RequestDispatchObjectStoreChannelHandler this.respIn = new PipedInputStream(); this.respOut = new PipedOutputStream(respIn); - if (HttpHeaderUtil.is100ContinueExpected(nettyReq)) { + if (HttpHeaders.is100ContinueExpected(nettyReq)) { LOG.trace("Sending continue response."); ctx.writeAndFlush(new DefaultFullHttpResponse(HTTP_1_1, CONTINUE)); } diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index ff519e9639d..793ffb456b5 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -96,6 +96,8 @@ 2.0.0-M21 1.0.0-M33 + + 0.1.1-alpha-8fd74ed-SNAPSHOT 1.0-alpha-1 3.3.1 2.4.12 @@ -873,6 +875,43 @@ + + org.jctools + jctools-core + 1.2.1 + + + + org.apache.ratis + ratis-proto-shaded + ${ratis.version} + + + ratis-common + org.apache.ratis + ${ratis.version} + + + ratis-client + org.apache.ratis + ${ratis.version} + + + ratis-server + org.apache.ratis + ${ratis.version} + + + ratis-netty + org.apache.ratis + ${ratis.version} + + + ratis-grpc + org.apache.ratis + ${ratis.version} + + io.netty netty diff --git a/hadoop-tools/hadoop-ozone/pom.xml b/hadoop-tools/hadoop-ozone/pom.xml index d05c1e81c35..1cacbb3e9a2 100644 --- a/hadoop-tools/hadoop-ozone/pom.xml +++ b/hadoop-tools/hadoop-ozone/pom.xml @@ -19,7 +19,7 @@ org.apache.hadoop hadoop-project - 3.1.0-SNAPSHOT + 3.2.0-SNAPSHOT ../../hadoop-project hadoop-ozone-filesystem diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java index f4e0b3df5e8..e85973258a0 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java @@ -216,13 +216,6 @@ public class SLSRunner extends Configured implements Tool { return Collections.unmodifiableMap(simulateInfoMap); } - /** - * @return an unmodifiable view of the simulated info map. - */ - public static Map getSimulateInfoMap() { - return Collections.unmodifiableMap(simulateInfoMap); - } - public void setSimulationParams(TraceType inType, String[] inTraces, String nodes, String outDir, Set trackApps, boolean printsimulation) throws IOException, ClassNotFoundException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java old mode 100755 new mode 100644