From 2d8bb2ac0208303f7d5ecee07efb473a8568771a Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Wed, 24 Oct 2018 16:17:05 -0700 Subject: [PATCH] HDDS-719. Remove Ozone dependencies on Apache Hadoop 3.2.0. Contributed by Arpit Agarwal. (cherry picked from commit 244afaba4a2dd7db830a0479941e11efb114cca0) --- .../org/apache/hadoop/hdds/HddsUtils.java | 11 ++ .../hdds/scm/block/BlockManagerImpl.java | 4 +- .../hdds/scm/HddsWhiteboxTestUtils.java | 103 ++++++++++++++++++ .../ozone/container/ContainerTestHelper.java | 6 +- .../apache/hadoop/ozone/om/TestOmMetrics.java | 23 ++-- .../ITestOzoneContractGetFileStatus.java | 6 +- .../genesis/BenchMarkDatanodeDispatcher.java | 6 +- 7 files changed, 139 insertions(+), 20 deletions(-) create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsWhiteboxTestUtils.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index bd75f2d0907..7a42a103039 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -41,9 +41,11 @@ import java.lang.reflect.Method; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.nio.file.Paths; +import java.util.Calendar; import java.util.Collection; import java.util.HashSet; import java.util.Map; +import java.util.TimeZone; import static org.apache.hadoop.hdfs.DFSConfigKeys .DFS_DATANODE_DNS_INTERFACE_KEY; @@ -69,6 +71,8 @@ public final class HddsUtils { public static final String OZONE_SCM_SERVICE_ID = "OzoneScmService"; public static final String OZONE_SCM_SERVICE_INSTANCE_ID = "OzoneScmServiceInstance"; + private static final TimeZone UTC_ZONE = TimeZone.getTimeZone("UTC"); + private static final int NO_PORT = -1; @@ -391,4 +395,11 @@ public final class HddsUtils { } } + /** + * Get the current UTC time in milliseconds. + * @return the current UTC time in milliseconds. + */ + public static long getUtcTime() { + return Calendar.getInstance(UTC_ZONE).getTimeInMillis(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index 30740c75c46..65e185e40cc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.block; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmUtils; @@ -36,7 +37,6 @@ import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -469,7 +469,7 @@ public class BlockManagerImpl implements EventHandler, * @return unique long value */ public static synchronized long next() { - long utcTime = Time.getUtcTime(); + long utcTime = HddsUtils.getUtcTime(); if ((utcTime & 0xFFFF000000000000L) == 0) { return utcTime << Short.SIZE | (offset++ & 0x0000FFFF); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsWhiteboxTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsWhiteboxTestUtils.java new file mode 100644 index 00000000000..abb96684076 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsWhiteboxTestUtils.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm; + +import java.lang.reflect.Field; + + +/** + * This class includes some functions copied from Mockito's + * Whitebox class for portability reasons. + * + * Whitebox methods are accessed differently in different + * versions of Hadoop. Specifically the availability of the class + * changed from Apache Hadoop 3.1.0 to Hadoop 3.2.0. + * + * Duplicating the test code is ugly but it allows building + * HDDS portably. + */ +public final class HddsWhiteboxTestUtils { + + /** + * Private constructor to disallow construction. + */ + private HddsWhiteboxTestUtils() { + } + + /** + * Get the field of the target object. + * @param target target object + * @param field field name + * @return the field of the object + */ + public static Object getInternalState(Object target, String field) { + Class c = target.getClass(); + try { + Field f = getFieldFromHierarchy(c, field); + f.setAccessible(true); + return f.get(target); + } catch (Exception e) { + throw new RuntimeException( + "Unable to set internal state on a private field.", e); + } + } + + /** + * Set the field of the target object. + * @param target target object + * @param field field name + * @param value value to set + */ + public static void setInternalState( + Object target, String field, Object value) { + Class c = target.getClass(); + try { + Field f = getFieldFromHierarchy(c, field); + f.setAccessible(true); + f.set(target, value); + } catch (Exception e) { + throw new RuntimeException( + "Unable to set internal state on a private field.", e); + } + } + + private static Field getFieldFromHierarchy(Class clazz, String field) { + Field f = getField(clazz, field); + while (f == null && clazz != Object.class) { + clazz = clazz.getSuperclass(); + f = getField(clazz, field); + } + if (f == null) { + throw new RuntimeException( + "You want me to set value to this field: '" + field + + "' on this class: '" + clazz.getSimpleName() + + "' but this field is not declared within hierarchy " + + "of this class!"); + } + return f; + } + + private static Field getField(Class clazz, String field) { + try { + return clazz.getDeclaredField(field); + } catch (NoSuchFieldException e) { + return null; + } + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 324187c3a29..3969ddd5cf4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -42,7 +43,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.util.Time; import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -591,11 +591,11 @@ public final class ContainerTestHelper { // Add 2ms delay so that localID based on UtcTime // won't collide. sleep(2); - return new BlockID(containerID, Time.getUtcTime()); + return new BlockID(containerID, HddsUtils.getUtcTime()); } public static long getTestContainerID() { - return Time.getUtcTime(); + return HddsUtils.getUtcTime(); } public static boolean isContainerClosed(MiniOzoneCluster cluster, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 8063981de70..665a9c76503 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import java.io.IOException; +import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -68,8 +69,8 @@ public class TestOmMetrics { @Test public void testVolumeOps() throws IOException { VolumeManager volumeManager = - (VolumeManager) org.apache.hadoop.test.Whitebox - .getInternalState(ozoneManager, "volumeManager"); + (VolumeManager) HddsWhiteboxTestUtils.getInternalState( + ozoneManager, "volumeManager"); VolumeManager mockVm = Mockito.spy(volumeManager); Mockito.doNothing().when(mockVm).createVolume(null); @@ -79,7 +80,7 @@ public class TestOmMetrics { Mockito.doNothing().when(mockVm).setOwner(null, null); Mockito.doReturn(null).when(mockVm).listVolumes(null, null, null, 0); - org.apache.hadoop.test.Whitebox.setInternalState( + HddsWhiteboxTestUtils.setInternalState( ozoneManager, "volumeManager", mockVm); doVolumeOps(); @@ -100,7 +101,7 @@ public class TestOmMetrics { Mockito.doThrow(exception).when(mockVm).setOwner(null, null); Mockito.doThrow(exception).when(mockVm).listVolumes(null, null, null, 0); - org.apache.hadoop.test.Whitebox.setInternalState(ozoneManager, + HddsWhiteboxTestUtils.setInternalState(ozoneManager, "volumeManager", mockVm); doVolumeOps(); @@ -124,8 +125,8 @@ public class TestOmMetrics { @Test public void testBucketOps() throws IOException { BucketManager bucketManager = - (BucketManager) org.apache.hadoop.test.Whitebox - .getInternalState(ozoneManager, "bucketManager"); + (BucketManager) HddsWhiteboxTestUtils.getInternalState( + ozoneManager, "bucketManager"); BucketManager mockBm = Mockito.spy(bucketManager); Mockito.doNothing().when(mockBm).createBucket(null); @@ -134,7 +135,7 @@ public class TestOmMetrics { Mockito.doNothing().when(mockBm).setBucketProperty(null); Mockito.doReturn(null).when(mockBm).listBuckets(null, null, null, 0); - org.apache.hadoop.test.Whitebox.setInternalState( + HddsWhiteboxTestUtils.setInternalState( ozoneManager, "bucketManager", mockBm); doBucketOps(); @@ -153,7 +154,7 @@ public class TestOmMetrics { Mockito.doThrow(exception).when(mockBm).setBucketProperty(null); Mockito.doThrow(exception).when(mockBm).listBuckets(null, null, null, 0); - org.apache.hadoop.test.Whitebox.setInternalState( + HddsWhiteboxTestUtils.setInternalState( ozoneManager, "bucketManager", mockBm); doBucketOps(); @@ -174,7 +175,7 @@ public class TestOmMetrics { @Test public void testKeyOps() throws IOException { - KeyManager bucketManager = (KeyManager) org.apache.hadoop.test.Whitebox + KeyManager bucketManager = (KeyManager) HddsWhiteboxTestUtils .getInternalState(ozoneManager, "keyManager"); KeyManager mockKm = Mockito.spy(bucketManager); @@ -183,7 +184,7 @@ public class TestOmMetrics { Mockito.doReturn(null).when(mockKm).lookupKey(null); Mockito.doReturn(null).when(mockKm).listKeys(null, null, null, null, 0); - org.apache.hadoop.test.Whitebox.setInternalState( + HddsWhiteboxTestUtils.setInternalState( ozoneManager, "keyManager", mockKm); doKeyOps(); @@ -201,7 +202,7 @@ public class TestOmMetrics { Mockito.doThrow(exception).when(mockKm).listKeys( null, null, null, null, 0); - org.apache.hadoop.test.Whitebox.setInternalState( + HddsWhiteboxTestUtils.setInternalState( ozoneManager, "keyManager", mockKm); doKeyOps(); diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java index fe442f76ca0..362b22f2831 100644 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java @@ -23,6 +23,8 @@ import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.junit.AfterClass; import org.junit.BeforeClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; @@ -32,6 +34,8 @@ import java.io.IOException; public class ITestOzoneContractGetFileStatus extends AbstractContractGetFileStatusTest { + private static final Logger LOG = + LoggerFactory.getLogger(ITestOzoneContractGetFileStatus.class); @BeforeClass public static void createCluster() throws IOException { @@ -50,7 +54,7 @@ public class ITestOzoneContractGetFileStatus @Override public void teardown() throws Exception { - getLogger().info("FS details {}", getFileSystem()); + LOG.info("FS details {}", getFileSystem()); super.teardown(); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java index 362e53f2653..f9e57536ce4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.genesis; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; import org.apache.hadoop.ozone.container.common.statemachine @@ -32,7 +33,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.util.Time; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Level; import org.openjdk.jmh.annotations.Scope; @@ -115,7 +115,7 @@ public class BenchMarkDatanodeDispatcher { // Create containers for (int x = 0; x < INIT_CONTAINERS; x++) { - long containerID = Time.getUtcTime() + x; + long containerID = HddsUtils.getUtcTime() + x; ContainerCommandRequestProto req = getCreateContainerCommand(containerID); dispatcher.dispatch(req); containers.add(containerID); @@ -123,7 +123,7 @@ public class BenchMarkDatanodeDispatcher { } for (int x = 0; x < INIT_KEYS; x++) { - keys.add(Time.getUtcTime()+x); + keys.add(HddsUtils.getUtcTime()+x); } for (int x = 0; x < INIT_CHUNKS; x++) {