From 132fd6ba5886b6f0fb570a78cfe25d4f2980ab4d Mon Sep 17 00:00:00 2001 From: Tsuyoshi Ozawa Date: Thu, 8 Jan 2015 14:51:57 +0900 Subject: [PATCH] HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch. (ozawa) (cherry picked from commit a6ed4894b518351bf1b3290e725a475570a21296) Conflicts: hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java --- .../hadoop-common/CHANGES.txt | 3 + .../apache/hadoop/util/JvmPauseMonitor.java | 6 +- .../org/apache/hadoop/util/StopWatch.java | 108 ++++++++++++++++++ .../hadoop/util/TestChunkedArrayList.java | 11 +- .../apache/hadoop/util/TestDataChecksum.java | 10 +- .../org/apache/hadoop/util/TestStopWatch.java | 62 ++++++++++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../qjournal/client/IPCLoggerChannel.java | 8 +- .../hadoop/hdfs/qjournal/server/Journal.java | 17 ++- .../hadoop/hdfs/TestMultiThreadedHflush.java | 11 +- .../hdfs/qjournal/server/TestJournalNode.java | 9 +- hadoop-mapreduce-project/CHANGES.txt | 3 + .../apache/hadoop/mapred/FileInputFormat.java | 12 +- .../mapreduce/lib/input/FileInputFormat.java | 12 +- 14 files changed, 229 insertions(+), 46 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StopWatch.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStopWatch.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 793a6b1745b..a5ea04ef096 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -103,6 +103,9 @@ Release 2.7.0 - UNRELEASED HADOOP-11390 Metrics 2 ganglia provider to include hostname in unresolved address problems. (Varun Saxena via stevel) + HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch + (ozawa) + OPTIMIZATIONS HADOOP-11323. WritableComparator#compare keeps reference to byte array. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java index e8af45e7462..1fe77964514 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java @@ -22,6 +22,7 @@ import java.lang.management.ManagementFactory; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -30,7 +31,6 @@ import org.apache.hadoop.conf.Configuration; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; -import com.google.common.base.Stopwatch; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; @@ -172,7 +172,7 @@ public class JvmPauseMonitor { private class Monitor implements Runnable { @Override public void run() { - Stopwatch sw = new Stopwatch(); + StopWatch sw = new StopWatch(); Map gcTimesBeforeSleep = getGcTimes(); while (shouldRun) { sw.reset().start(); @@ -181,7 +181,7 @@ public class JvmPauseMonitor { } catch (InterruptedException ie) { return; } - long extraSleepTime = sw.elapsedMillis() - SLEEP_INTERVAL_MS; + long extraSleepTime = sw.now(TimeUnit.MILLISECONDS) - SLEEP_INTERVAL_MS; Map gcTimesAfterSleep = getGcTimes(); if (extraSleepTime > warnThresholdMs) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StopWatch.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StopWatch.java new file mode 100644 index 00000000000..b9d0d0b6646 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StopWatch.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util; + +import java.io.Closeable; +import java.util.concurrent.TimeUnit; + +/** + * A simplified StopWatch implementation which can measure times in nanoseconds. + */ +public class StopWatch implements Closeable { + private boolean isStarted; + private long startNanos; + private long currentElapsedNanos; + + public StopWatch() { + } + + /** + * The method is used to find out if the StopWatch is started. + * @return boolean If the StopWatch is started. + */ + public boolean isRunning() { + return isStarted; + } + + /** + * Start to measure times and make the state of stopwatch running. + * @return this instance of StopWatch. + */ + public StopWatch start() { + if (isStarted) { + throw new IllegalStateException("StopWatch is already running"); + } + isStarted = true; + startNanos = System.nanoTime(); + return this; + } + + /** + * Stop elapsed time and make the state of stopwatch stop. + * @return this instance of StopWatch. + */ + public StopWatch stop() { + if (!isStarted) { + throw new IllegalStateException("StopWatch is already stopped"); + } + long now = System.nanoTime(); + isStarted = false; + currentElapsedNanos += now - startNanos; + return this; + } + + /** + * Reset elapsed time to zero and make the state of stopwatch stop. + * @return this instance of StopWatch. + */ + public StopWatch reset() { + currentElapsedNanos = 0; + isStarted = false; + return this; + } + + /** + * @return current elapsed time in specified timeunit. + */ + public long now(TimeUnit timeUnit) { + return timeUnit.convert(now(), TimeUnit.NANOSECONDS); + + } + + /** + * @return current elapsed time in nanosecond. + */ + public long now() { + return isStarted ? + System.nanoTime() - startNanos + currentElapsedNanos : + currentElapsedNanos; + } + + @Override + public String toString() { + return String.valueOf(now()); + } + + @Override + public void close() { + if (isStarted) { + stop(); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestChunkedArrayList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestChunkedArrayList.java index f8a2d49c580..a007f85c244 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestChunkedArrayList.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestChunkedArrayList.java @@ -21,12 +21,11 @@ import static org.junit.Assert.*; import java.util.ArrayList; import java.util.Iterator; +import java.util.concurrent.TimeUnit; import org.junit.Assert; import org.junit.Test; -import com.google.common.base.Stopwatch; - public class TestChunkedArrayList { @Test @@ -71,24 +70,24 @@ public class TestChunkedArrayList { System.gc(); { ArrayList arrayList = new ArrayList(); - Stopwatch sw = new Stopwatch(); + StopWatch sw = new StopWatch(); sw.start(); for (int i = 0; i < numElems; i++) { arrayList.add(obj); } - System.out.println(" ArrayList " + sw.elapsedMillis()); + System.out.println(" ArrayList " + sw.now(TimeUnit.MILLISECONDS)); } // test ChunkedArrayList System.gc(); { ChunkedArrayList chunkedList = new ChunkedArrayList(); - Stopwatch sw = new Stopwatch(); + StopWatch sw = new StopWatch(); sw.start(); for (int i = 0; i < numElems; i++) { chunkedList.add(obj); } - System.out.println("ChunkedArrayList " + sw.elapsedMillis()); + System.out.println("ChunkedArrayList " + sw.now(TimeUnit.MILLISECONDS)); } } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java index 34fc32aa08f..73fd25a7fa2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java @@ -21,8 +21,6 @@ import java.nio.ByteBuffer; import java.util.Random; import java.util.concurrent.TimeUnit; -import com.google.common.base.Stopwatch; - import org.apache.hadoop.fs.ChecksumException; import org.junit.Test; @@ -147,19 +145,19 @@ public class TestDataChecksum { Harness h = new Harness(checksum, dataLength, true); for (int i = 0; i < NUM_RUNS; i++) { - Stopwatch s = new Stopwatch().start(); + StopWatch s = new StopWatch().start(); // calculate real checksum, make sure it passes checksum.calculateChunkedSums(h.dataBuf, h.checksumBuf); s.stop(); System.err.println("Calculate run #" + i + ": " + - s.elapsedTime(TimeUnit.MICROSECONDS) + "us"); + s.now(TimeUnit.MICROSECONDS) + "us"); - s = new Stopwatch().start(); + s = new StopWatch().start(); // calculate real checksum, make sure it passes checksum.verifyChunkedSums(h.dataBuf, h.checksumBuf, "fake file", 0); s.stop(); System.err.println("Verify run #" + i + ": " + - s.elapsedTime(TimeUnit.MICROSECONDS) + "us"); + s.now(TimeUnit.MICROSECONDS) + "us"); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStopWatch.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStopWatch.java new file mode 100644 index 00000000000..6f577b08d24 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStopWatch.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util; + +import org.junit.Assert; +import org.junit.Test; + +public class TestStopWatch { + + @Test + public void testStartAndStop() throws Exception { + try (StopWatch sw = new StopWatch()) { + Assert.assertFalse(sw.isRunning()); + sw.start(); + Assert.assertTrue(sw.isRunning()); + sw.stop(); + Assert.assertFalse(sw.isRunning()); + } + } + + @Test + public void testStopInTryWithResource() throws Exception { + try (StopWatch sw = new StopWatch()) { + // make sure that no exception is thrown. + } + } + + @Test + public void testExceptions() throws Exception { + StopWatch sw = new StopWatch(); + try { + sw.stop(); + } catch (Exception e) { + Assert.assertTrue("IllegalStateException is expected", + e instanceof IllegalStateException); + } + sw.reset(); + sw.start(); + try { + sw.start(); + } catch (Exception e) { + Assert.assertTrue("IllegalStateException is expected", + e instanceof IllegalStateException); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2eb6714ef3c..8feac3e9689 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -222,6 +222,9 @@ Release 2.7.0 - UNRELEASED HDFS-7484. Make FSDirectory#addINode take existing INodes as its parameter. (jing9) + HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch + (ozawa) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 63d77bc6e29..2d3215b4449 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -52,10 +52,10 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.util.StopWatch; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.base.Stopwatch; import com.google.common.net.InetAddresses; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; @@ -143,7 +143,7 @@ public class IPCLoggerChannel implements AsyncLogger { /** * Stopwatch which starts counting on each heartbeat that is sent */ - private final Stopwatch lastHeartbeatStopwatch = new Stopwatch(); + private final StopWatch lastHeartbeatStopwatch = new StopWatch(); private static final long HEARTBEAT_INTERVAL_MILLIS = 1000; @@ -463,8 +463,8 @@ public class IPCLoggerChannel implements AsyncLogger { * written. */ private void heartbeatIfNecessary() throws IOException { - if (lastHeartbeatStopwatch.elapsedMillis() > HEARTBEAT_INTERVAL_MILLIS || - !lastHeartbeatStopwatch.isRunning()) { + if (lastHeartbeatStopwatch.now(TimeUnit.MILLISECONDS) + > HEARTBEAT_INTERVAL_MILLIS || !lastHeartbeatStopwatch.isRunning()) { try { getProxy().heartbeat(createReqInfo()); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 0eccb235818..8989aebea3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -65,11 +65,11 @@ import org.apache.hadoop.security.UserGroupInformation; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Preconditions; -import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableList; import com.google.common.collect.Range; import com.google.common.collect.Ranges; import com.google.protobuf.TextFormat; +import org.apache.hadoop.util.StopWatch; /** * A JournalNode can manage journals for several clusters at once. @@ -374,15 +374,20 @@ public class Journal implements Closeable { curSegment.writeRaw(records, 0, records.length); curSegment.setReadyToFlush(); - Stopwatch sw = new Stopwatch(); + StopWatch sw = new StopWatch(); sw.start(); curSegment.flush(shouldFsync); sw.stop(); - - metrics.addSync(sw.elapsedTime(TimeUnit.MICROSECONDS)); - if (sw.elapsedTime(TimeUnit.MILLISECONDS) > WARN_SYNC_MILLIS_THRESHOLD) { + + long nanoSeconds = sw.now(); + metrics.addSync( + TimeUnit.MICROSECONDS.convert(nanoSeconds, TimeUnit.NANOSECONDS)); + long milliSeconds = TimeUnit.MILLISECONDS.convert( + nanoSeconds, TimeUnit.NANOSECONDS); + + if (milliSeconds > WARN_SYNC_MILLIS_THRESHOLD) { LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId + - " took " + sw.elapsedTime(TimeUnit.MILLISECONDS) + "ms"); + " took " + milliSeconds + "ms"); } if (isLagging) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java index 92c7672f04b..a839d85823b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java @@ -32,12 +32,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.metrics2.util.Quantile; import org.apache.hadoop.metrics2.util.SampleQuantiles; +import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.junit.Test; -import com.google.common.base.Stopwatch; - /** * This class tests hflushing concurrently from many threads. */ @@ -100,10 +99,10 @@ public class TestMultiThreadedHflush { } private void doAWrite() throws IOException { - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); stm.write(toWrite); stm.hflush(); - long micros = sw.elapsedTime(TimeUnit.MICROSECONDS); + long micros = sw.now(TimeUnit.MICROSECONDS); quantiles.insert(micros); } } @@ -276,12 +275,12 @@ public class TestMultiThreadedHflush { int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT); - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); test.doMultithreadedWrites(conf, p, numThreads, writeSize, numWrites, replication); sw.stop(); - System.out.println("Finished in " + sw.elapsedMillis() + "ms"); + System.out.println("Finished in " + sw.now(TimeUnit.MILLISECONDS) + "ms"); System.out.println("Latency quantiles (in microseconds):\n" + test.quantiles); return 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java index 56a4348da89..a68d9551781 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java @@ -27,6 +27,7 @@ import java.io.File; import java.net.HttpURLConnection; import java.net.URL; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; @@ -38,8 +39,6 @@ import org.apache.hadoop.hdfs.qjournal.QJMTestUtil; import org.apache.hadoop.hdfs.qjournal.client.IPCLoggerChannel; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; -import org.apache.hadoop.hdfs.qjournal.server.Journal; -import org.apache.hadoop.hdfs.qjournal.server.JournalNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -48,12 +47,12 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.StopWatch; import org.junit.After; import org.junit.Before; import org.junit.Test; import com.google.common.base.Charsets; -import com.google.common.base.Stopwatch; import com.google.common.primitives.Bytes; import com.google.common.primitives.Ints; @@ -325,11 +324,11 @@ public class TestJournalNode { ch.setEpoch(1); ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get(); - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); for (int i = 1; i < numEdits; i++) { ch.sendEdits(1L, i, 1, data).get(); } - long time = sw.elapsedMillis(); + long time = sw.now(TimeUnit.MILLISECONDS); System.err.println("Wrote " + numEdits + " batches of " + editsSize + " bytes in " + time + "ms"); diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 9bbc105b22c..ea0d11888c4 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -23,6 +23,9 @@ Release 2.7.0 - UNRELEASED MAPREDUCE-6046. Change the class name for logs in RMCommunicator (Sahil Takiar via devaraj) + HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch + (ozawa) + OPTIMIZATIONS MAPREDUCE-6169. MergeQueue should release reference to the current item diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java index 0ae56717ab9..5e45b49e9ec 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java @@ -28,6 +28,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -45,9 +46,9 @@ import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.StringUtils; -import com.google.common.base.Stopwatch; import com.google.common.collect.Iterables; /** @@ -223,7 +224,7 @@ public abstract class FileInputFormat implements InputFormat { org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS, org.apache.hadoop.mapreduce.lib.input.FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS); - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); if (numThreads == 1) { List locatedFiles = singleThreadedListStatus(job, dirs, inputFilter, recursive); result = locatedFiles.toArray(new FileStatus[locatedFiles.size()]); @@ -242,7 +243,8 @@ public abstract class FileInputFormat implements InputFormat { sw.stop(); if (LOG.isDebugEnabled()) { - LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis()); + LOG.debug("Time taken to get FileStatuses: " + + sw.now(TimeUnit.MILLISECONDS)); } LOG.info("Total input paths to process : " + result.length); return result; @@ -309,7 +311,7 @@ public abstract class FileInputFormat implements InputFormat { * they're too big.*/ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); FileStatus[] files = listStatus(job); // Save the number of input files for metrics/loadgen @@ -371,7 +373,7 @@ public abstract class FileInputFormat implements InputFormat { sw.stop(); if (LOG.isDebugEnabled()) { LOG.debug("Total # of splits generated by getSplits: " + splits.size() - + ", TimeTaken: " + sw.elapsedMillis()); + + ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS)); } return splits.toArray(new FileSplit[splits.size()]); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java index 56fb9fcdf11..a3ffe019c8f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -43,9 +44,9 @@ import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.security.TokenCache; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.StringUtils; -import com.google.common.base.Stopwatch; import com.google.common.collect.Lists; /** @@ -259,7 +260,7 @@ public abstract class FileInputFormat extends InputFormat { int numThreads = job.getConfiguration().getInt(LIST_STATUS_NUM_THREADS, DEFAULT_LIST_STATUS_NUM_THREADS); - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); if (numThreads == 1) { result = singleThreadedListStatus(job, dirs, inputFilter, recursive); } else { @@ -276,7 +277,8 @@ public abstract class FileInputFormat extends InputFormat { sw.stop(); if (LOG.isDebugEnabled()) { - LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis()); + LOG.debug("Time taken to get FileStatuses: " + + sw.now(TimeUnit.MILLISECONDS)); } LOG.info("Total input paths to process : " + result.size()); return result; @@ -376,7 +378,7 @@ public abstract class FileInputFormat extends InputFormat { * @throws IOException */ public List getSplits(JobContext job) throws IOException { - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job)); long maxSize = getMaxSplitSize(job); @@ -427,7 +429,7 @@ public abstract class FileInputFormat extends InputFormat { sw.stop(); if (LOG.isDebugEnabled()) { LOG.debug("Total # of splits generated by getSplits: " + splits.size() - + ", TimeTaken: " + sw.elapsedMillis()); + + ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS)); } return splits; }