diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 827a58d37ee..864ece0b171 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -41,6 +41,9 @@ Release 2.0.3-alpha - Unreleased
HADOOP-8909. Hadoop Common Maven protoc calls must not depend on external
sh script. (Chris Nauroth via suresh)
+ HADOOP-8911. CRLF characters in source and text files.
+ (Raja Aluri via suresh)
+
OPTIMIZATIONS
HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
diff --git a/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html b/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
index e3915caf83a..bd13cfb9b61 100644
--- a/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
+++ b/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
@@ -15,8 +15,8 @@ These release notes include new developer and user-facing incompatibilities, fea
YARN-137.
Major improvement reported by Siddharth Seth and fixed by Siddharth Seth (scheduler) Change the default scheduler to the CapacityScheduler
-
There's some bugs in the FifoScheduler atm - doesn't distribute tasks across nodes and some headroom (available resource) issues.
-That's not the best experience for users trying out the 2.0 branch. The CS with the default configuration of a single queue behaves the same as the FifoScheduler and doesn't have these issues.
+
There's some bugs in the FifoScheduler atm - doesn't distribute tasks across nodes and some headroom (available resource) issues.
+That's not the best experience for users trying out the 2.0 branch. The CS with the default configuration of a single queue behaves the same as the FifoScheduler and doesn't have these issues.
YARN-108.
Critical bug reported by Jason Lowe and fixed by Jason Lowe (nodemanager)
@@ -45,73 +45,73 @@ That's not the best experience for users trying out the 2.0 branch. The CS with
YARN-79.
Major bug reported by Bikas Saha and fixed by Vinod Kumar Vavilapalli (client) Calling YarnClientImpl.close throws Exception
-
The following exception is thrown
-===========
-*org.apache.hadoop.HadoopIllegalArgumentException: Cannot close proxy - is not Closeable or does not provide closeable invocation handler class org.apache.hadoop.yarn.api.impl.pb.client.ClientRMProtocolPBClientImpl*
- *at org.apache.hadoop.ipc.RPC.stopProxy(RPC.java:624)*
- *at org.hadoop.yarn.client.YarnClientImpl.stop(YarnClientImpl.java:102)*
- at org.apache.hadoop.yarn.applications.unmanagedamlauncher.UnmanagedAMLauncher.run(UnmanagedAMLauncher.java:336)
- at org.apache.hadoop.yarn.applications.unmanagedamlauncher.TestUnmanagedAMLauncher.testDSShell(TestUnmanagedAMLauncher.java:156)
- at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
- at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
- at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
- at java.lang.reflect.Method.invoke(Method.java:597)
- at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
- at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
- at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
- at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:20)
- at org.junit.runners.BlockJUnit4ClassRunner.runNotIgnored(BlockJUnit4ClassRunner.java:79)
- at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:71)
- at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:49)
- at org.junit.runners.ParentRunner$3.run(ParentRunner.java:193)
- at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:52)
- at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:191)
- at org.junit.runners.ParentRunner.access$000(ParentRunner.java:42)
- at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:184)
- at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:28)
- at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:31)
- at org.junit.runners.ParentRunner.run(ParentRunner.java:236)
- at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:236)
- at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:134)
- at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:113)
- at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
- at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
- at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
- at java.lang.reflect.Method.invoke(Method.java:597)
- at org.apache.maven.surefire.util.ReflectionUtils.invokeMethodWithArray(ReflectionUtils.java:189)
- at org.apache.maven.surefire.booter.ProviderFactory$ProviderProxy.invoke(ProviderFactory.java:165)
- at org.apache.maven.surefire.booter.ProviderFactory.invokeProvider(ProviderFactory.java:85)
- at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:103)
- at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:74)
+
The following exception is thrown
+===========
+*org.apache.hadoop.HadoopIllegalArgumentException: Cannot close proxy - is not Closeable or does not provide closeable invocation handler class org.apache.hadoop.yarn.api.impl.pb.client.ClientRMProtocolPBClientImpl*
+ *at org.apache.hadoop.ipc.RPC.stopProxy(RPC.java:624)*
+ *at org.hadoop.yarn.client.YarnClientImpl.stop(YarnClientImpl.java:102)*
+ at org.apache.hadoop.yarn.applications.unmanagedamlauncher.UnmanagedAMLauncher.run(UnmanagedAMLauncher.java:336)
+ at org.apache.hadoop.yarn.applications.unmanagedamlauncher.TestUnmanagedAMLauncher.testDSShell(TestUnmanagedAMLauncher.java:156)
+ at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+ at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
+ at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
+ at java.lang.reflect.Method.invoke(Method.java:597)
+ at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
+ at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
+ at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
+ at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:20)
+ at org.junit.runners.BlockJUnit4ClassRunner.runNotIgnored(BlockJUnit4ClassRunner.java:79)
+ at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:71)
+ at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:49)
+ at org.junit.runners.ParentRunner$3.run(ParentRunner.java:193)
+ at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:52)
+ at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:191)
+ at org.junit.runners.ParentRunner.access$000(ParentRunner.java:42)
+ at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:184)
+ at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:28)
+ at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:31)
+ at org.junit.runners.ParentRunner.run(ParentRunner.java:236)
+ at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:236)
+ at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:134)
+ at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:113)
+ at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+ at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
+ at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
+ at java.lang.reflect.Method.invoke(Method.java:597)
+ at org.apache.maven.surefire.util.ReflectionUtils.invokeMethodWithArray(ReflectionUtils.java:189)
+ at org.apache.maven.surefire.booter.ProviderFactory$ProviderProxy.invoke(ProviderFactory.java:165)
+ at org.apache.maven.surefire.booter.ProviderFactory.invokeProvider(ProviderFactory.java:85)
+ at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:103)
+ at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:74)
===========
YARN-75.
Major bug reported by Siddharth Seth and fixed by Siddharth Seth RMContainer should handle a RELEASE event while RUNNING
-
An AppMaster can send a container release at any point. Currently this results in an exception, if this is done while the RM considers the container to be RUNNING.
+
An AppMaster can send a container release at any point. Currently this results in an exception, if this is done while the RM considers the container to be RUNNING.
The event not being processed correctly also implies that these containers do not show up in the Completed Container List seen by the AM (AMRMProtocol). MR-3902 depends on this set being complete.
YARN-68.
Major bug reported by patrick white and fixed by Daryn Sharp (nodemanager) NodeManager will refuse to shutdown indefinitely due to container log aggregation
-
The nodemanager is able to get into a state where containermanager.logaggregation.AppLogAggregatorImpl will apparently wait
-indefinitely for log aggregation to complete for an application, even if that application has abnormally terminated and is no longer present.
-
-Observed behavior is that an attempt to stop the nodemanager daemon will return but have no effect, the nm log continually displays messages similar to this:
-
-[Thread-1]2012-08-21 17:44:07,581 INFO
-org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AppLogAggregatorImpl:
-Waiting for aggregation to complete for application_1345221477405_2733
-
-The only recovery we found to work was to 'kill -9' the nm process.
-
-What exactly causes the NM to enter this state is unclear but we do see this behavior reliably when the NM has run a task which failed, for example when debugging oozie distcp actions and having a distcp map task fail, the NM that was running the container will now enter this state where a shutdown on said NM will never complete, 'never' in this case was waiting for 2 hours before killing the nodemanager process.
+
The nodemanager is able to get into a state where containermanager.logaggregation.AppLogAggregatorImpl will apparently wait
+indefinitely for log aggregation to complete for an application, even if that application has abnormally terminated and is no longer present.
+
+Observed behavior is that an attempt to stop the nodemanager daemon will return but have no effect, the nm log continually displays messages similar to this:
+
+[Thread-1]2012-08-21 17:44:07,581 INFO
+org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AppLogAggregatorImpl:
+Waiting for aggregation to complete for application_1345221477405_2733
+
+The only recovery we found to work was to 'kill -9' the nm process.
+
+What exactly causes the NM to enter this state is unclear but we do see this behavior reliably when the NM has run a task which failed, for example when debugging oozie distcp actions and having a distcp map task fail, the NM that was running the container will now enter this state where a shutdown on said NM will never complete, 'never' in this case was waiting for 2 hours before killing the nodemanager process.
YARN-66.
Critical bug reported by Thomas Graves and fixed by Thomas Graves (nodemanager) aggregated logs permissions not set properly
-
If the default file permissions are set to something restrictive - like 700, application logs get aggregated and created with those restrictive file permissions which doesn't allow the history server to serve them up.
-
-
-They need to be created with group readable similar to how log aggregation sets up the directory permissions.
+
If the default file permissions are set to something restrictive - like 700, application logs get aggregated and created with those restrictive file permissions which doesn't allow the history server to serve them up.
+
+
+They need to be created with group readable similar to how log aggregation sets up the directory permissions.
YARN-63.
Major bug reported by Jason Lowe and fixed by Jason Lowe (resourcemanager)
@@ -128,47 +128,47 @@ They need to be created with group readable similar to how log aggregation sets
YARN-42.
Major bug reported by Devaraj K and fixed by Devaraj K (nodemanager) Node Manager throws NPE on startup
-
NM throws NPE on startup if it doesn't have persmission's on nm local dir's
-
-
-{code:xml}
-2012-05-14 16:32:13,468 FATAL org.apache.hadoop.yarn.server.nodemanager.NodeManager: Error starting NodeManager
-org.apache.hadoop.yarn.YarnException: Failed to initialize LocalizationService
- at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:202)
- at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)
- at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.init(ContainerManagerImpl.java:183)
- at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)
- at org.apache.hadoop.yarn.server.nodemanager.NodeManager.init(NodeManager.java:166)
- at org.apache.hadoop.yarn.server.nodemanager.NodeManager.initAndStartNodeManager(NodeManager.java:268)
- at org.apache.hadoop.yarn.server.nodemanager.NodeManager.main(NodeManager.java:284)
-Caused by: java.io.IOException: mkdir of /mrv2/tmp/nm-local-dir/usercache failed
- at org.apache.hadoop.fs.FileSystem.primitiveMkdir(FileSystem.java:907)
- at org.apache.hadoop.fs.DelegateToFileSystem.mkdir(DelegateToFileSystem.java:143)
- at org.apache.hadoop.fs.FilterFs.mkdir(FilterFs.java:189)
- at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:706)
- at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:703)
- at org.apache.hadoop.fs.FileContext$FSLinkResolver.resolve(FileContext.java:2325)
- at org.apache.hadoop.fs.FileContext.mkdir(FileContext.java:703)
- at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:188)
- ... 6 more
-2012-05-14 16:32:13,472 INFO org.apache.hadoop.yarn.service.CompositeService: Error stopping org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler
-java.lang.NullPointerException
- at org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler.stop(NonAggregatingLogHandler.java:82)
- at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)
- at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)
- at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.stop(ContainerManagerImpl.java:266)
- at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)
- at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)
- at org.apache.hadoop.yarn.server.nodemanager.NodeManager.stop(NodeManager.java:182)
- at org.apache.hadoop.yarn.service.CompositeService$CompositeServiceShutdownHook.run(CompositeService.java:122)
- at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
-{code}
+
NM throws NPE on startup if it doesn't have persmission's on nm local dir's
+
+
+{code:xml}
+2012-05-14 16:32:13,468 FATAL org.apache.hadoop.yarn.server.nodemanager.NodeManager: Error starting NodeManager
+org.apache.hadoop.yarn.YarnException: Failed to initialize LocalizationService
+ at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:202)
+ at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)
+ at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.init(ContainerManagerImpl.java:183)
+ at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)
+ at org.apache.hadoop.yarn.server.nodemanager.NodeManager.init(NodeManager.java:166)
+ at org.apache.hadoop.yarn.server.nodemanager.NodeManager.initAndStartNodeManager(NodeManager.java:268)
+ at org.apache.hadoop.yarn.server.nodemanager.NodeManager.main(NodeManager.java:284)
+Caused by: java.io.IOException: mkdir of /mrv2/tmp/nm-local-dir/usercache failed
+ at org.apache.hadoop.fs.FileSystem.primitiveMkdir(FileSystem.java:907)
+ at org.apache.hadoop.fs.DelegateToFileSystem.mkdir(DelegateToFileSystem.java:143)
+ at org.apache.hadoop.fs.FilterFs.mkdir(FilterFs.java:189)
+ at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:706)
+ at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:703)
+ at org.apache.hadoop.fs.FileContext$FSLinkResolver.resolve(FileContext.java:2325)
+ at org.apache.hadoop.fs.FileContext.mkdir(FileContext.java:703)
+ at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:188)
+ ... 6 more
+2012-05-14 16:32:13,472 INFO org.apache.hadoop.yarn.service.CompositeService: Error stopping org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler
+java.lang.NullPointerException
+ at org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler.stop(NonAggregatingLogHandler.java:82)
+ at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)
+ at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)
+ at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.stop(ContainerManagerImpl.java:266)
+ at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)
+ at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)
+ at org.apache.hadoop.yarn.server.nodemanager.NodeManager.stop(NodeManager.java:182)
+ at org.apache.hadoop.yarn.service.CompositeService$CompositeServiceShutdownHook.run(CompositeService.java:122)
+ at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
+{code}
YARN-39.
Critical sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli RM-NM secret-keys should be randomly generated and rolled every so often
-
- RM should generate the master-key randomly
- - The master-key should roll every so often
+
- RM should generate the master-key randomly
+ - The master-key should roll every so often
- NM should remember old expired keys so that already doled out container-requests can be satisfied.
YARN-37.
Minor bug reported by Jason Lowe and fixed by Mayank Bansal (resourcemanager)
@@ -177,42 +177,42 @@ java.lang.NullPointerException
YARN-36.
Blocker bug reported by Eli Collins and fixed by Radim Kolar branch-2.1.0-alpha doesn't build
-
branch-2.1.0-alpha doesn't build due to the following. Per YARN-1 I updated the mvn version to be 2.1.0-SNAPSHOT, before I hit this issue it didn't compile due to the bogus version.
-
-{noformat}
-hadoop-branch-2.1.0-alpha $ mvn compile
-[INFO] Scanning for projects...
-[ERROR] The build could not read 1 project -> [Help 1]
-[ERROR]
-[ERROR] The project org.apache.hadoop:hadoop-yarn-project:2.1.0-SNAPSHOT (/home/eli/src/hadoop-branch-2.1.0-alpha/hadoop-yarn-project/pom.xml) has 1 error
-[ERROR] 'dependencies.dependency.version' for org.hsqldb:hsqldb:jar is missing. @ line 160, column 17
+
branch-2.1.0-alpha doesn't build due to the following. Per YARN-1 I updated the mvn version to be 2.1.0-SNAPSHOT, before I hit this issue it didn't compile due to the bogus version.
+
+{noformat}
+hadoop-branch-2.1.0-alpha $ mvn compile
+[INFO] Scanning for projects...
+[ERROR] The build could not read 1 project -> [Help 1]
+[ERROR]
+[ERROR] The project org.apache.hadoop:hadoop-yarn-project:2.1.0-SNAPSHOT (/home/eli/src/hadoop-branch-2.1.0-alpha/hadoop-yarn-project/pom.xml) has 1 error
+[ERROR] 'dependencies.dependency.version' for org.hsqldb:hsqldb:jar is missing. @ line 160, column 17
{noformat}
YARN-31.
Major bug reported by Thomas Graves and fixed by Thomas Graves TestDelegationTokenRenewer fails on jdk7
-
TestDelegationTokenRenewer fails when run with jdk7.
-
+
TestDelegationTokenRenewer fails when run with jdk7.
+
With JDK7, test methods run in an undefined order. Here it is expecting that testDTRenewal runs first but it no longer is.
YARN-29.
Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (client) Add a yarn-client module
-
I see that we are duplicating (some) code for talking to RM via client API. In this light, a yarn-client module will be useful so that clients of all frameworks can use/extend it.
-
+
I see that we are duplicating (some) code for talking to RM via client API. In this light, a yarn-client module will be useful so that clients of all frameworks can use/extend it.
+
And that same module can be the destination for all the YARN's command line tools.
YARN-27.
Major bug reported by Ramya Sunil and fixed by Arun C Murthy Failed refreshQueues due to misconfiguration prevents further refreshing of queues
-
Stumbled upon this problem while refreshing queues with incorrect configuration. The exact scenario was:
-1. Added a new queue "newQueue" without defining its capacity.
-2. "bin/mapred queue -refreshQueues" fails correctly with "Illegal capacity of -1 for queue root.newQueue"
-3. However, after defining the capacity of "newQueue" followed by a second "bin/mapred queue -refreshQueues" throws "org.apache.hadoop.metrics2.MetricsException: Metrics source QueueMetrics,q0=root,q1=newQueue already exists!" Also see Hadoop:name=QueueMetrics,q0=root,q1=newQueue,service=ResourceManager metrics being available even though the queue was not added.
-
+
Stumbled upon this problem while refreshing queues with incorrect configuration. The exact scenario was:
+1. Added a new queue "newQueue" without defining its capacity.
+2. "bin/mapred queue -refreshQueues" fails correctly with "Illegal capacity of -1 for queue root.newQueue"
+3. However, after defining the capacity of "newQueue" followed by a second "bin/mapred queue -refreshQueues" throws "org.apache.hadoop.metrics2.MetricsException: Metrics source QueueMetrics,q0=root,q1=newQueue already exists!" Also see Hadoop:name=QueueMetrics,q0=root,q1=newQueue,service=ResourceManager metrics being available even though the queue was not added.
+
The expected behavior would be to refresh the queues correctly and allow addition of "newQueue".
YARN-25.
Major bug reported by Thomas Graves and fixed by Robert Joseph Evans remove old aggregated logs
-
Currently the aggregated user logs under NM_REMOTE_APP_LOG_DIR are never removed. We should have mechanism to remove them after certain period.
-
+
Currently the aggregated user logs under NM_REMOTE_APP_LOG_DIR are never removed. We should have mechanism to remove them after certain period.
+
It might make sense for job history server to remove them.
YARN-22.
Minor bug reported by Eli Collins and fixed by Mayank Bansal
@@ -221,29 +221,29 @@ It might make sense for job history server to remove them.
YARN-15.
Critical bug reported by Alejandro Abdelnur and fixed by Arun C Murthy (nodemanager) YarnConfiguration DEFAULT_YARN_APPLICATION_CLASSPATH should be updated
-
-{code}
- /**
- * Default CLASSPATH for YARN applications. A comma-separated list of
- * CLASSPATH entries
- */
- public static final String[] DEFAULT_YARN_APPLICATION_CLASSPATH = {
- "$HADOOP_CONF_DIR", "$HADOOP_COMMON_HOME/share/hadoop/common/*",
- "$HADOOP_COMMON_HOME/share/hadoop/common/lib/*",
- "$HADOOP_HDFS_HOME/share/hadoop/hdfs/*",
- "$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*",
- "$YARN_HOME/share/hadoop/mapreduce/*",
- "$YARN_HOME/share/hadoop/mapreduce/lib/*"};
-{code}
-
+
+{code}
+ /**
+ * Default CLASSPATH for YARN applications. A comma-separated list of
+ * CLASSPATH entries
+ */
+ public static final String[] DEFAULT_YARN_APPLICATION_CLASSPATH = {
+ "$HADOOP_CONF_DIR", "$HADOOP_COMMON_HOME/share/hadoop/common/*",
+ "$HADOOP_COMMON_HOME/share/hadoop/common/lib/*",
+ "$HADOOP_HDFS_HOME/share/hadoop/hdfs/*",
+ "$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*",
+ "$YARN_HOME/share/hadoop/mapreduce/*",
+ "$YARN_HOME/share/hadoop/mapreduce/lib/*"};
+{code}
+
It should have {{share/yarn/}} and MR should add the {{share/mapreduce/}} (another JIRA?)
YARN-14.
Major bug reported by Jason Lowe and fixed by Jason Lowe (nodemanager) Symlinks to peer distributed cache files no longer work
-
Trying to create a symlink to another file that is specified for the distributed cache will fail to create the link. For example:
-
-hadoop jar ... -files "x,y,x#z"
-
+
Trying to create a symlink to another file that is specified for the distributed cache will fail to create the link. For example:
+
+hadoop jar ... -files "x,y,x#z"
+
will localize the files x and y as x and y, but the z symlink for x will not be created. This is a regression from 1.x behavior.
YARN-13.
Critical bug reported by Todd Lipcon and fixed by
@@ -252,13 +252,13 @@ will localize the files x and y as x and y, but the z symlink for x will not be
YARN-12.
Major bug reported by Junping Du and fixed by Junping Du (scheduler) Several Findbugs issues with new FairScheduler in YARN
-
The good feature of FairScheduler is added recently to YARN. As recently PreCommit test from MAPREDUCE-4309, there are several bugs found by Findbugs related to FairScheduler:
-org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.shutdown() might ignore java.lang.Exception
-Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.logDisabled; locked 50% of time
-Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.queueMaxAppsDefault; locked 50% of time
-Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.userMaxAppsDefault; locked 50% of time
-The details are in:https://builds.apache.org/job/PreCommit-MAPREDUCE-Build/2612//artifact/trunk/patchprocess/newPatchFindbugsWarningshadoop-yarn-server-resourcemanager.html#DE_MIGHT_IGNORE
-
+
The good feature of FairScheduler is added recently to YARN. As recently PreCommit test from MAPREDUCE-4309, there are several bugs found by Findbugs related to FairScheduler:
+org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.shutdown() might ignore java.lang.Exception
+Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.logDisabled; locked 50% of time
+Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.queueMaxAppsDefault; locked 50% of time
+Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.userMaxAppsDefault; locked 50% of time
+The details are in:https://builds.apache.org/job/PreCommit-MAPREDUCE-Build/2612//artifact/trunk/patchprocess/newPatchFindbugsWarningshadoop-yarn-server-resourcemanager.html#DE_MIGHT_IGNORE
+
YARN-10.
Major improvement reported by Arun C Murthy and fixed by Hitesh Shah
@@ -991,18 +991,18 @@ The details are in:https://builds.apache.org/job/PreCommit-MAPREDUCE-Build/2612/
MAPREDUCE-3812.
Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Harsh J (mrv2 , performance) Lower default allocation sizes, fix allocation configurations and document them
-
Removes two sets of previously available config properties:
-
-1. ( yarn.scheduler.fifo.minimum-allocation-mb and yarn.scheduler.fifo.maximum-allocation-mb ) and,
-2. ( yarn.scheduler.capacity.minimum-allocation-mb and yarn.scheduler.capacity.maximum-allocation-mb )
-
-In favor of two new, generically named properties:
-
-1. yarn.scheduler.minimum-allocation-mb - This acts as the floor value of memory resource requests for containers.
-2. yarn.scheduler.maximum-allocation-mb - This acts as the ceiling value of memory resource requests for containers.
-
-Both these properties need to be set at the ResourceManager (RM) to take effect, as the RM is where the scheduler resides.
-
+
Removes two sets of previously available config properties:
+
+1. ( yarn.scheduler.fifo.minimum-allocation-mb and yarn.scheduler.fifo.maximum-allocation-mb ) and,
+2. ( yarn.scheduler.capacity.minimum-allocation-mb and yarn.scheduler.capacity.maximum-allocation-mb )
+
+In favor of two new, generically named properties:
+
+1. yarn.scheduler.minimum-allocation-mb - This acts as the floor value of memory resource requests for containers.
+2. yarn.scheduler.maximum-allocation-mb - This acts as the ceiling value of memory resource requests for containers.
+
+Both these properties need to be set at the ResourceManager (RM) to take effect, as the RM is where the scheduler resides.
+
Also changes the default minimum and maximums to 128 MB and 10 GB respectively.
MAPREDUCE-3782.
Critical bug reported by Arpit Gupta and fixed by Jason Lowe (mrv2)
@@ -1043,8 +1043,8 @@ Also changes the default minimum and maximums to 128 MB and 10 GB respectively.<
MAPREDUCE-3543.
Critical bug reported by Mahadev konar and fixed by Thomas Graves (mrv2) Mavenize Gridmix.
-
Note that to apply this you should first run the script - ./MAPREDUCE-3543v3.sh svn, then apply the patch.
-
+
Note that to apply this you should first run the script - ./MAPREDUCE-3543v3.sh svn, then apply the patch.
+
If this is merged to more then trunk, the version inside of hadoop-tools/hadoop-gridmix/pom.xml will need to be udpated accordingly.
MAPREDUCE-3506.
Minor bug reported by Ratandeep Ratti and fixed by Jason Lowe (client , mrv2)
@@ -1613,10 +1613,10 @@ If this is merged to more then trunk, the version inside of hadoop-tools/hadoop-
HDFS-3475.
Trivial improvement reported by Harsh J and fixed by Harsh J Make the replication and invalidation rates configurable
-
This change adds two new configuration parameters.
-# {{dfs.namenode.invalidate.work.pct.per.iteration}} for controlling deletion rate of blocks.
-# {{dfs.namenode.replication.work.multiplier.per.iteration}} for controlling replication rate. This in turn allows controlling the time it takes for decommissioning.
-
+
This change adds two new configuration parameters.
+# {{dfs.namenode.invalidate.work.pct.per.iteration}} for controlling deletion rate of blocks.
+# {{dfs.namenode.replication.work.multiplier.per.iteration}} for controlling replication rate. This in turn allows controlling the time it takes for decommissioning.
+
Please see hdfs-default.xml for detailed description.
HDFS-3474.
Major sub-task reported by Ivan Kelly and fixed by Ivan Kelly
@@ -4769,8 +4769,8 @@ These release notes include new developer and user-facing incompatibilities, fea
MAPREDUCE-3720.
Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (client , mrv2) Command line listJobs should not visit each AM
-
Changed bin/mapred job -list to not print job-specific information not available at RM.
-
+
Changed bin/mapred job -list to not print job-specific information not available at RM.
+
Very minor incompatibility in cmd-line output, inevitable due to MRv2 architecture.
MAPREDUCE-3718.
Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Hitesh Shah (mrv2 , performance)
@@ -4819,8 +4819,8 @@ Very minor incompatibility in cmd-line output, inevitable due to MRv2 architectu
MAPREDUCE-3703.
Critical bug reported by Eric Payne and fixed by Eric Payne (mrv2 , resourcemanager) ResourceManager should provide node lists in JMX output
-
New JMX Bean in ResourceManager to provide list of live node managers:
-
+
New JMX Bean in ResourceManager to provide list of live node managers:
+
Hadoop:service=ResourceManager,name=RMNMInfo LiveNodeManagers
MAPREDUCE-3702.
Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)
@@ -5037,12 +5037,12 @@ Hadoop:service=ResourceManager,name=RMNMInfo LiveNodeManagers
MAPREDUCE-3549.
Blocker bug reported by Thomas Graves and fixed by Thomas Graves (mrv2) write api documentation for web service apis for RM, NM, mapreduce app master, and job history server
-
new files added: A hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WebServicesIntro.apt.vm
-A hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/NodeManagerRest.apt.vm
-A hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
-A hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm
-A hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm
-
+
new files added: A hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WebServicesIntro.apt.vm
+A hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/NodeManagerRest.apt.vm
+A hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
+A hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm
+A hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm
+
The hadoop-project/src/site/site.xml is split into separate patch.
MAPREDUCE-3548.
Critical sub-task reported by Thomas Graves and fixed by Thomas Graves (mrv2)
@@ -5471,7 +5471,7 @@ The hadoop-project/src/site/site.xml is split into separate patch.<
MAPREDUCE-3297.
Major task reported by Siddharth Seth and fixed by Siddharth Seth (mrv2) Move Log Related components from yarn-server-nodemanager to yarn-common
-
Moved log related components into yarn-common so that HistoryServer and clients can use them without depending on the yarn-server-nodemanager module.
+
Moved log related components into yarn-common so that HistoryServer and clients can use them without depending on the yarn-server-nodemanager module.
MAPREDUCE-3291.
Blocker bug reported by Ramya Sunil and fixed by Robert Joseph Evans (mrv2)
@@ -5504,17 +5504,17 @@ The hadoop-project/src/site/site.xml is split into separate patch.<
MAPREDUCE-3219.
Minor sub-task reported by Hitesh Shah and fixed by Hitesh Shah (mrv2 , test) ant test TestDelegationToken failing on trunk
-
Reenabled and fixed bugs in the failing test TestDelegationToken.
+
Reenabled and fixed bugs in the failing test TestDelegationToken.
MAPREDUCE-3217.
Minor sub-task reported by Hitesh Shah and fixed by Devaraj K (mrv2 , test) ant test TestAuditLogger fails on trunk
-
Reenabled and fixed bugs in the failing ant test TestAuditLogger.
+
Reenabled and fixed bugs in the failing ant test TestAuditLogger.
MAPREDUCE-3215.
Minor sub-task reported by Hitesh Shah and fixed by Hitesh Shah (mrv2) org.apache.hadoop.mapreduce.TestNoJobSetupCleanup failing on trunk
-
Reneabled and fixed bugs in the failing test TestNoJobSetupCleanup.
+
Reneabled and fixed bugs in the failing test TestNoJobSetupCleanup.
MAPREDUCE-3194.
Major bug reported by Siddharth Seth and fixed by Jason Lowe (mrv2)
@@ -5875,12 +5875,12 @@ The hadoop-project/src/site/site.xml is split into separate patch.<
HDFS-2246.
Major improvement reported by Sanjay Radia and fixed by Jitendra Nath Pandey Shortcut a local client reads to a Datanodes files directly
-
1. New configurations
-a. dfs.block.local-path-access.user is the key in datanode configuration to specify the user allowed to do short circuit read.
-b. dfs.client.read.shortcircuit is the key to enable short circuit read at the client side configuration.
-c. dfs.client.read.shortcircuit.skip.checksum is the key to bypass checksum check at the client side.
-2. By default none of the above are enabled and short circuit read will not kick in.
-3. If security is on, the feature can be used only for user that has kerberos credentials at the client, therefore map reduce tasks cannot benefit from it in general.
+
1. New configurations
+a. dfs.block.local-path-access.user is the key in datanode configuration to specify the user allowed to do short circuit read.
+b. dfs.client.read.shortcircuit is the key to enable short circuit read at the client side configuration.
+c. dfs.client.read.shortcircuit.skip.checksum is the key to bypass checksum check at the client side.
+2. By default none of the above are enabled and short circuit read will not kick in.
+3. If security is on, the feature can be used only for user that has kerberos credentials at the client, therefore map reduce tasks cannot benefit from it in general.
HDFS-2178.
Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur
@@ -6161,7 +6161,7 @@ c. dfs.client.read.shortcircuit.skip.checksum is the key to bypass checksum chec
HADOOP-7802.
Major bug reported by Bruno Mahé and fixed by Bruno Mahé Hadoop scripts unconditionally source "$bin"/../libexec/hadoop-config.sh.
-
Here is a patch to enable this behavior
+
Here is a patch to enable this behavior
HADOOP-7801.
Major bug reported by Bruno Mahé and fixed by Bruno Mahé (build)
@@ -6486,9 +6486,9 @@ These release notes include new developer and user-facing incompatibilities, fea
MAPREDUCE-3186.
Blocker bug reported by Ramgopal N and fixed by Eric Payne (mrv2) User jobs are getting hanged if the Resource manager process goes down and comes up while job is getting executed.
-
New Yarn configuration property:
-
-Name: yarn.app.mapreduce.am.scheduler.connection.retries
+
New Yarn configuration property:
+
+Name: yarn.app.mapreduce.am.scheduler.connection.retries
Description: Number of times AM should retry to contact RM if connection is lost.
MAPREDUCE-3185.
Critical bug reported by Mahadev konar and fixed by Jonathan Eagles (mrv2)
@@ -6641,7 +6641,7 @@ Description: Number of times AM should retry to contact RM if connection is lost
MAPREDUCE-3112.
Major bug reported by Eric Yang and fixed by Eric Yang (contrib/streaming) Calling hadoop cli inside mapreduce job leads to errors
-
Removed inheritance of certain server environment variables (HADOOP_OPTS and HADOOP_ROOT_LOGGER) in task attempt process.
+
Removed inheritance of certain server environment variables (HADOOP_OPTS and HADOOP_ROOT_LOGGER) in task attempt process.
MAPREDUCE-3110.
Major bug reported by Devaraj K and fixed by Vinod Kumar Vavilapalli (mrv2 , test)
@@ -7114,16 +7114,16 @@ Description: Number of times AM should retry to contact RM if connection is lost
MAPREDUCE-2858.
Blocker sub-task reported by Luke Lu and fixed by Robert Joseph Evans (applicationmaster , mrv2 , security) MRv2 WebApp Security
-
A new server has been added to yarn. It is a web proxy that sits in front of the AM web UI. The server is controlled by the yarn.web-proxy.address config. If that config is set, and it points to an address that is different then the RM web interface then a separate proxy server needs to be launched.
-
-This can be done by running
-
-yarn-daemon.sh start proxyserver
-
-If a separate proxy server is needed other configs also may need to be set, if security is enabled.
-yarn.web-proxy.principal
-yarn.web-proxy.keytab
-
+
A new server has been added to yarn. It is a web proxy that sits in front of the AM web UI. The server is controlled by the yarn.web-proxy.address config. If that config is set, and it points to an address that is different then the RM web interface then a separate proxy server needs to be launched.
+
+This can be done by running
+
+yarn-daemon.sh start proxyserver
+
+If a separate proxy server is needed other configs also may need to be set, if security is enabled.
+yarn.web-proxy.principal
+yarn.web-proxy.keytab
+
The proxy server is stateless and should be able to support a VIP or other load balancing sitting in front of multiple instances of this server.
MAPREDUCE-2854.
Major bug reported by Thomas Graves and fixed by Thomas Graves
@@ -8061,12 +8061,12 @@ mapreduce.reduce.shuffle.catch.exception.message.regex
MAPREDUCE-2037.
Major new feature reported by Dick King and fixed by Dick King Capturing interim progress times, CPU usage, and memory usage, when tasks reach certain progress thresholds
-
Capture intermediate task resource consumption information:
-* Time taken so far
-* CPU load [either at the time the data are taken, or exponentially smoothed]
-* Memory load [also either at the time the data are taken, or exponentially smoothed]
-
-This would be taken at intervals that depend on the task progress plateaus. For example, reducers have three progress ranges - [0-1/3], (1/3-2/3], and (2/3-3/3] - where fundamentally different activities happen. Mappers have different boundaries that are not symmetrically placed [0-9/10], (9/10-1]. Data capture boundaries should coincide with activity boundaries. For the state information capture [CPU and memory] we should average over the covered interval.
+
Capture intermediate task resource consumption information:
+* Time taken so far
+* CPU load [either at the time the data are taken, or exponentially smoothed]
+* Memory load [also either at the time the data are taken, or exponentially smoothed]
+
+This would be taken at intervals that depend on the task progress plateaus. For example, reducers have three progress ranges - [0-1/3], (1/3-2/3], and (2/3-3/3] - where fundamentally different activities happen. Mappers have different boundaries that are not symmetrically placed [0-9/10], (9/10-1]. Data capture boundaries should coincide with activity boundaries. For the state information capture [CPU and memory] we should average over the covered interval.
MAPREDUCE-2033.
Major task reported by Vinay Kumar Thota and fixed by Vinay Kumar Thota (contrib/gridmix)
@@ -8175,24 +8175,24 @@ This would be taken at intervals that depend on the task progress plateaus. For
MAPREDUCE-279.
Major improvement reported by Arun C Murthy and fixed by (mrv2) Map-Reduce 2.0
-
MapReduce has undergone a complete re-haul in hadoop-0.23 and we now have, what we call, MapReduce 2.0 (MRv2).
-
-The fundamental idea of MRv2 is to split up the two major functionalities of the JobTracker, resource management and job scheduling/monitoring, into separate daemons. The idea is to have a global ResourceManager (RM) and per-application ApplicationMaster (AM). An application is either a single job in the classical sense of Map-Reduce jobs or a DAG of jobs. The ResourceManager and per-node slave, the NodeManager (NM), form the data-computation framework. The ResourceManager is the ultimate authority that arbitrates resources among all the applications in the system. The per-application ApplicationMaster is, in effect, a framework specific library and is tasked with negotiating resources from the ResourceManager and working with the NodeManager(s) to execute and monitor the tasks.
-
-The ResourceManager has two main components:
-* Scheduler (S)
-* ApplicationsManager (ASM)
-
-The Scheduler is responsible for allocating resources to the various running applications subject to familiar constraints of capacities, queues etc. The Scheduler is pure scheduler in the sense that it performs no monitoring or tracking of status for the application. Also, it offers no guarantees on restarting failed tasks either due to application failure or hardware failures. The Scheduler performs its scheduling function based the resource requirements of the applications; it does so based on the abstract notion of a Resource Container which incorporates elements such as memory, cpu, disk, network etc.
-
-The Scheduler has a pluggable policy plug-in, which is responsible for partitioning the cluster resources among the various queues, applications etc. The current Map-Reduce schedulers such as the CapacityScheduler and the FairScheduler would be some examples of the plug-in.
-
-The CapacityScheduler supports hierarchical queues to allow for more predictable sharing of cluster resources.
-The ApplicationsManager is responsible for accepting job-submissions, negotiating the first container for executing the application specific ApplicationMaster and provides the service for restarting the ApplicationMaster container on failure.
-
-The NodeManager is the per-machine framework agent who is responsible for launching the applications' containers, monitoring their resource usage (cpu, memory, disk, network) and reporting the same to the Scheduler.
-
-The per-application ApplicationMaster has the responsibility of negotiating appropriate resource containers from the Scheduler, tracking their status and monitoring for progress.
+
MapReduce has undergone a complete re-haul in hadoop-0.23 and we now have, what we call, MapReduce 2.0 (MRv2).
+
+The fundamental idea of MRv2 is to split up the two major functionalities of the JobTracker, resource management and job scheduling/monitoring, into separate daemons. The idea is to have a global ResourceManager (RM) and per-application ApplicationMaster (AM). An application is either a single job in the classical sense of Map-Reduce jobs or a DAG of jobs. The ResourceManager and per-node slave, the NodeManager (NM), form the data-computation framework. The ResourceManager is the ultimate authority that arbitrates resources among all the applications in the system. The per-application ApplicationMaster is, in effect, a framework specific library and is tasked with negotiating resources from the ResourceManager and working with the NodeManager(s) to execute and monitor the tasks.
+
+The ResourceManager has two main components:
+* Scheduler (S)
+* ApplicationsManager (ASM)
+
+The Scheduler is responsible for allocating resources to the various running applications subject to familiar constraints of capacities, queues etc. The Scheduler is pure scheduler in the sense that it performs no monitoring or tracking of status for the application. Also, it offers no guarantees on restarting failed tasks either due to application failure or hardware failures. The Scheduler performs its scheduling function based the resource requirements of the applications; it does so based on the abstract notion of a Resource Container which incorporates elements such as memory, cpu, disk, network etc.
+
+The Scheduler has a pluggable policy plug-in, which is responsible for partitioning the cluster resources among the various queues, applications etc. The current Map-Reduce schedulers such as the CapacityScheduler and the FairScheduler would be some examples of the plug-in.
+
+The CapacityScheduler supports hierarchical queues to allow for more predictable sharing of cluster resources.
+The ApplicationsManager is responsible for accepting job-submissions, negotiating the first container for executing the application specific ApplicationMaster and provides the service for restarting the ApplicationMaster container on failure.
+
+The NodeManager is the per-machine framework agent who is responsible for launching the applications' containers, monitoring their resource usage (cpu, memory, disk, network) and reporting the same to the Scheduler.
+
+The per-application ApplicationMaster has the responsibility of negotiating appropriate resource containers from the Scheduler, tracking their status and monitoring for progress.
HDFS-2540.
Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE
@@ -8253,10 +8253,10 @@ The per-application ApplicationMaster has the responsibility of negotiating appr
HDFS-2465.
Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node , performance) Add HDFS support for fadvise readahead and drop-behind
-
HDFS now has the ability to use posix_fadvise and sync_data_range syscalls to manage the OS buffer cache. This support is currently considered experimental, and may be enabled by configuring the following keys:
-dfs.datanode.drop.cache.behind.writes - set to true to drop data out of the buffer cache after writing
-dfs.datanode.drop.cache.behind.reads - set to true to drop data out of the buffer cache when performing sequential reads
-dfs.datanode.sync.behind.writes - set to true to trigger dirty page writeback immediately after writing data
+
HDFS now has the ability to use posix_fadvise and sync_data_range syscalls to manage the OS buffer cache. This support is currently considered experimental, and may be enabled by configuring the following keys:
+dfs.datanode.drop.cache.behind.writes - set to true to drop data out of the buffer cache after writing
+dfs.datanode.drop.cache.behind.reads - set to true to drop data out of the buffer cache when performing sequential reads
+dfs.datanode.sync.behind.writes - set to true to trigger dirty page writeback immediately after writing data
dfs.datanode.readahead.bytes - set to a non-zero value to trigger readahead for sequential reads
HDFS-2453.
Major sub-task reported by Arpit Gupta and fixed by Tsz Wo (Nicholas), SZE (webhdfs)
@@ -9331,7 +9331,7 @@ This is an incompatible change in 0.23. The versions of ClientProtocol and Data
HDFS-1594.
Major bug reported by Devaraj K and fixed by Aaron T. Myers (name-node) When the disk becomes full Namenode is getting shutdown and not able to recover
-
Implemented a daemon thread to monitor the disk usage for periodically and if the disk usage reaches the threshold value, put the name node into Safe mode so that no modification to file system will occur. Once the disk usage reaches below the threshold, name node will be put out of the safe mode. Here threshold value and interval to check the disk usage are configurable.
+
Implemented a daemon thread to monitor the disk usage for periodically and if the disk usage reaches the threshold value, put the name node into Safe mode so that no modification to file system will occur. Once the disk usage reaches below the threshold, name node will be put out of the safe mode. Here threshold value and interval to check the disk usage are configurable.
HDFS-1592.
Major bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi
@@ -9376,9 +9376,9 @@ This is an incompatible change in 0.23. The versions of ClientProtocol and Data
HDFS-1547.
Major improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node) Improve decommission mechanism
-
Summary of changes to the decommissioning process:
-# After nodes are decommissioned, they are not shutdown. The decommissioned nodes are not used for writes. For reads, the decommissioned nodes are given as the last location to read from.
-# Number of live and dead decommissioned nodes are displayed in the namenode webUI.
+
Summary of changes to the decommissioning process:
+# After nodes are decommissioned, they are not shutdown. The decommissioned nodes are not used for writes. For reads, the decommissioned nodes are given as the last location to read from.
+# Number of live and dead decommissioned nodes are displayed in the namenode webUI.
# Decommissioned nodes free capacity is not count towards the the cluster free capacity.
HDFS-1541.
Major sub-task reported by Hairong Kuang and fixed by Hairong Kuang (name-node)
@@ -9491,10 +9491,10 @@ This is an incompatible change in 0.23. The versions of ClientProtocol and Data
HDFS-1448.
Major new feature reported by Erik Steffl and fixed by Erik Steffl (tools) Create multi-format parser for edits logs file, support binary and XML formats initially
-
Offline edits viewer feature adds oev tool to hdfs script. Oev makes it possible to convert edits logs to/from native binary and XML formats. It uses the same framework as Offline image viewer.
-
-Example usage:
-
+
Offline edits viewer feature adds oev tool to hdfs script. Oev makes it possible to convert edits logs to/from native binary and XML formats. It uses the same framework as Offline image viewer.
+
+Example usage:
+
$HADOOP_HOME/bin/hdfs oev -i edits -o output.xml
HDFS-1445.
Major sub-task reported by Matt Foley and fixed by Matt Foley (data-node)
@@ -9762,7 +9762,7 @@ This change requires an upgrade at deployment.
HADOOP-7681.
Minor bug reported by Arpit Gupta and fixed by Arpit Gupta (conf) log4j.properties is missing properties for security audit and hdfs audit should be changed to info
-
HADOOP-7681. Fixed security and hdfs audit log4j properties
+
HADOOP-7681. Fixed security and hdfs audit log4j properties
(Arpit Gupta via Eric Yang)
HADOOP-7671.
Major bug reported by Ravi Prakash and fixed by Ravi Prakash
@@ -10363,8 +10363,8 @@ This change requires an upgrade at deployment.
HADOOP-7227.
Major improvement reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey (ipc) Remove protocol version check at proxy creation in Hadoop RPC.
-
1. Protocol version check is removed from proxy creation, instead version check is performed at server in every rpc call.
-2. This change is backward incompatible because format of the rpc messages is changed to include client version, client method hash and rpc version.
+
1. Protocol version check is removed from proxy creation, instead version check is performed at server in every rpc call.
+2. This change is backward incompatible because format of the rpc messages is changed to include client version, client method hash and rpc version.
3. rpc version is introduced which should change when the format of rpc messages is changed.
HADOOP-7223.
Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas (fs)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ContextFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ContextFactory.java
index 25efef9b659..034ea3589a5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ContextFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ContextFactory.java
@@ -1,211 +1,211 @@
-/*
- * ContextFactory.java
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metrics;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.metrics.spi.NullContext;
-
-/**
- * Factory class for creating MetricsContext objects. To obtain an instance
- * of this class, use the static getFactory() method.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-@InterfaceStability.Evolving
-public class ContextFactory {
-
- private static final String PROPERTIES_FILE =
- "/hadoop-metrics.properties";
- private static final String CONTEXT_CLASS_SUFFIX =
- ".class";
- private static final String DEFAULT_CONTEXT_CLASSNAME =
- "org.apache.hadoop.metrics.spi.NullContext";
-
- private static ContextFactory theFactory = null;
-
- private Map attributeMap = new HashMap();
- private Map contextMap =
- new HashMap();
-
- // Used only when contexts, or the ContextFactory itself, cannot be
- // created.
- private static Map nullContextMap =
- new HashMap();
-
- /** Creates a new instance of ContextFactory */
- protected ContextFactory() {
- }
-
- /**
- * Returns the value of the named attribute, or null if there is no
- * attribute of that name.
- *
- * @param attributeName the attribute name
- * @return the attribute value
- */
- public Object getAttribute(String attributeName) {
- return attributeMap.get(attributeName);
- }
-
- /**
- * Returns the names of all the factory's attributes.
- *
- * @return the attribute names
- */
- public String[] getAttributeNames() {
- String[] result = new String[attributeMap.size()];
- int i = 0;
- // for (String attributeName : attributeMap.keySet()) {
- Iterator it = attributeMap.keySet().iterator();
- while (it.hasNext()) {
- result[i++] = (String) it.next();
- }
- return result;
- }
-
- /**
- * Sets the named factory attribute to the specified value, creating it
- * if it did not already exist. If the value is null, this is the same as
- * calling removeAttribute.
- *
- * @param attributeName the attribute name
- * @param value the new attribute value
- */
- public void setAttribute(String attributeName, Object value) {
- attributeMap.put(attributeName, value);
- }
-
- /**
- * Removes the named attribute if it exists.
- *
- * @param attributeName the attribute name
- */
- public void removeAttribute(String attributeName) {
- attributeMap.remove(attributeName);
- }
-
- /**
- * Returns the named MetricsContext instance, constructing it if necessary
- * using the factory's current configuration attributes.
- *
- * When constructing the instance, if the factory property
- * contextName.class exists,
- * its value is taken to be the name of the class to instantiate. Otherwise,
- * the default is to create an instance of
- * org.apache.hadoop.metrics.spi.NullContext, which is a
- * dummy "no-op" context which will cause all metric data to be discarded.
- *
- * @param contextName the name of the context
- * @return the named MetricsContext
- */
- public synchronized MetricsContext getContext(String refName, String contextName)
- throws IOException, ClassNotFoundException,
- InstantiationException, IllegalAccessException {
- MetricsContext metricsContext = contextMap.get(refName);
- if (metricsContext == null) {
- String classNameAttribute = refName + CONTEXT_CLASS_SUFFIX;
- String className = (String) getAttribute(classNameAttribute);
- if (className == null) {
- className = DEFAULT_CONTEXT_CLASSNAME;
- }
- Class contextClass = Class.forName(className);
- metricsContext = (MetricsContext) contextClass.newInstance();
- metricsContext.init(contextName, this);
- contextMap.put(contextName, metricsContext);
- }
- return metricsContext;
- }
-
- public synchronized MetricsContext getContext(String contextName)
- throws IOException, ClassNotFoundException, InstantiationException,
- IllegalAccessException {
- return getContext(contextName, contextName);
- }
-
- /**
- * Returns all MetricsContexts built by this factory.
- */
- public synchronized Collection getAllContexts() {
- // Make a copy to avoid race conditions with creating new contexts.
- return new ArrayList(contextMap.values());
- }
-
- /**
- * Returns a "null" context - one which does nothing.
- */
- public static synchronized MetricsContext getNullContext(String contextName) {
- MetricsContext nullContext = nullContextMap.get(contextName);
- if (nullContext == null) {
- nullContext = new NullContext();
- nullContextMap.put(contextName, nullContext);
- }
- return nullContext;
- }
-
- /**
- * Returns the singleton ContextFactory instance, constructing it if
- * necessary.
- *
- * When the instance is constructed, this method checks if the file
- * hadoop-metrics.properties exists on the class path. If it
- * exists, it must be in the format defined by java.util.Properties, and all
- * the properties in the file are set as attributes on the newly created
- * ContextFactory instance.
- *
- * @return the singleton ContextFactory instance
- */
- public static synchronized ContextFactory getFactory() throws IOException {
- if (theFactory == null) {
- theFactory = new ContextFactory();
- theFactory.setAttributes();
- }
- return theFactory;
- }
-
- private void setAttributes() throws IOException {
- InputStream is = getClass().getResourceAsStream(PROPERTIES_FILE);
- if (is != null) {
- try {
- Properties properties = new Properties();
- properties.load(is);
- //for (Object propertyNameObj : properties.keySet()) {
- Iterator it = properties.keySet().iterator();
- while (it.hasNext()) {
- String propertyName = (String) it.next();
- String propertyValue = properties.getProperty(propertyName);
- setAttribute(propertyName, propertyValue);
- }
- } finally {
- is.close();
- }
- }
- }
-
-}
+/*
+ * ContextFactory.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.spi.NullContext;
+
+/**
+ * Factory class for creating MetricsContext objects. To obtain an instance
+ * of this class, use the static getFactory() method.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
+public class ContextFactory {
+
+ private static final String PROPERTIES_FILE =
+ "/hadoop-metrics.properties";
+ private static final String CONTEXT_CLASS_SUFFIX =
+ ".class";
+ private static final String DEFAULT_CONTEXT_CLASSNAME =
+ "org.apache.hadoop.metrics.spi.NullContext";
+
+ private static ContextFactory theFactory = null;
+
+ private Map attributeMap = new HashMap();
+ private Map contextMap =
+ new HashMap();
+
+ // Used only when contexts, or the ContextFactory itself, cannot be
+ // created.
+ private static Map nullContextMap =
+ new HashMap();
+
+ /** Creates a new instance of ContextFactory */
+ protected ContextFactory() {
+ }
+
+ /**
+ * Returns the value of the named attribute, or null if there is no
+ * attribute of that name.
+ *
+ * @param attributeName the attribute name
+ * @return the attribute value
+ */
+ public Object getAttribute(String attributeName) {
+ return attributeMap.get(attributeName);
+ }
+
+ /**
+ * Returns the names of all the factory's attributes.
+ *
+ * @return the attribute names
+ */
+ public String[] getAttributeNames() {
+ String[] result = new String[attributeMap.size()];
+ int i = 0;
+ // for (String attributeName : attributeMap.keySet()) {
+ Iterator it = attributeMap.keySet().iterator();
+ while (it.hasNext()) {
+ result[i++] = (String) it.next();
+ }
+ return result;
+ }
+
+ /**
+ * Sets the named factory attribute to the specified value, creating it
+ * if it did not already exist. If the value is null, this is the same as
+ * calling removeAttribute.
+ *
+ * @param attributeName the attribute name
+ * @param value the new attribute value
+ */
+ public void setAttribute(String attributeName, Object value) {
+ attributeMap.put(attributeName, value);
+ }
+
+ /**
+ * Removes the named attribute if it exists.
+ *
+ * @param attributeName the attribute name
+ */
+ public void removeAttribute(String attributeName) {
+ attributeMap.remove(attributeName);
+ }
+
+ /**
+ * Returns the named MetricsContext instance, constructing it if necessary
+ * using the factory's current configuration attributes.
+ *
+ * When constructing the instance, if the factory property
+ * contextName.class exists,
+ * its value is taken to be the name of the class to instantiate. Otherwise,
+ * the default is to create an instance of
+ * org.apache.hadoop.metrics.spi.NullContext, which is a
+ * dummy "no-op" context which will cause all metric data to be discarded.
+ *
+ * @param contextName the name of the context
+ * @return the named MetricsContext
+ */
+ public synchronized MetricsContext getContext(String refName, String contextName)
+ throws IOException, ClassNotFoundException,
+ InstantiationException, IllegalAccessException {
+ MetricsContext metricsContext = contextMap.get(refName);
+ if (metricsContext == null) {
+ String classNameAttribute = refName + CONTEXT_CLASS_SUFFIX;
+ String className = (String) getAttribute(classNameAttribute);
+ if (className == null) {
+ className = DEFAULT_CONTEXT_CLASSNAME;
+ }
+ Class contextClass = Class.forName(className);
+ metricsContext = (MetricsContext) contextClass.newInstance();
+ metricsContext.init(contextName, this);
+ contextMap.put(contextName, metricsContext);
+ }
+ return metricsContext;
+ }
+
+ public synchronized MetricsContext getContext(String contextName)
+ throws IOException, ClassNotFoundException, InstantiationException,
+ IllegalAccessException {
+ return getContext(contextName, contextName);
+ }
+
+ /**
+ * Returns all MetricsContexts built by this factory.
+ */
+ public synchronized Collection getAllContexts() {
+ // Make a copy to avoid race conditions with creating new contexts.
+ return new ArrayList(contextMap.values());
+ }
+
+ /**
+ * Returns a "null" context - one which does nothing.
+ */
+ public static synchronized MetricsContext getNullContext(String contextName) {
+ MetricsContext nullContext = nullContextMap.get(contextName);
+ if (nullContext == null) {
+ nullContext = new NullContext();
+ nullContextMap.put(contextName, nullContext);
+ }
+ return nullContext;
+ }
+
+ /**
+ * Returns the singleton ContextFactory instance, constructing it if
+ * necessary.
+ *
+ * When the instance is constructed, this method checks if the file
+ * hadoop-metrics.properties exists on the class path. If it
+ * exists, it must be in the format defined by java.util.Properties, and all
+ * the properties in the file are set as attributes on the newly created
+ * ContextFactory instance.
+ *
+ * @return the singleton ContextFactory instance
+ */
+ public static synchronized ContextFactory getFactory() throws IOException {
+ if (theFactory == null) {
+ theFactory = new ContextFactory();
+ theFactory.setAttributes();
+ }
+ return theFactory;
+ }
+
+ private void setAttributes() throws IOException {
+ InputStream is = getClass().getResourceAsStream(PROPERTIES_FILE);
+ if (is != null) {
+ try {
+ Properties properties = new Properties();
+ properties.load(is);
+ //for (Object propertyNameObj : properties.keySet()) {
+ Iterator it = properties.keySet().iterator();
+ while (it.hasNext()) {
+ String propertyName = (String) it.next();
+ String propertyValue = properties.getProperty(propertyName);
+ setAttribute(propertyName, propertyValue);
+ }
+ } finally {
+ is.close();
+ }
+ }
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsContext.java
index b84d5265036..e297e3738b1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsContext.java
@@ -1,122 +1,122 @@
-/*
- * MetricsContext.java
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metrics;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.metrics.spi.OutputRecord;
-
-/**
- * The main interface to the metrics package.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public interface MetricsContext {
-
- /**
- * Default period in seconds at which data is sent to the metrics system.
- */
- public static final int DEFAULT_PERIOD = 5;
-
- /**
- * Initialize this context.
- * @param contextName The given name for this context
- * @param factory The creator of this context
- */
- public void init(String contextName, ContextFactory factory);
-
- /**
- * Returns the context name.
- *
- * @return the context name
- */
- public abstract String getContextName();
-
- /**
- * Starts or restarts monitoring, the emitting of metrics records as they are
- * updated.
- */
- public abstract void startMonitoring()
- throws IOException;
-
- /**
- * Stops monitoring. This does not free any data that the implementation
- * may have buffered for sending at the next timer event. It
- * is OK to call startMonitoring() again after calling
- * this.
- * @see #close()
- */
- public abstract void stopMonitoring();
-
- /**
- * Returns true if monitoring is currently in progress.
- */
- public abstract boolean isMonitoring();
-
- /**
- * Stops monitoring and also frees any buffered data, returning this
- * object to its initial state.
- */
- public abstract void close();
-
- /**
- * Creates a new MetricsRecord instance with the given recordName.
- * Throws an exception if the metrics implementation is configured with a fixed
- * set of record names and recordName is not in that set.
- *
- * @param recordName the name of the record
- * @throws MetricsException if recordName conflicts with configuration data
- */
- public abstract MetricsRecord createRecord(String recordName);
-
- /**
- * Registers a callback to be called at regular time intervals, as
- * determined by the implementation-class specific configuration.
- *
- * @param updater object to be run periodically; it should updated
- * some metrics records and then return
- */
- public abstract void registerUpdater(Updater updater);
-
- /**
- * Removes a callback, if it exists.
- *
- * @param updater object to be removed from the callback list
- */
- public abstract void unregisterUpdater(Updater updater);
-
- /**
- * Returns the timer period.
- */
- public abstract int getPeriod();
-
- /**
- * Retrieves all the records managed by this MetricsContext.
- * Useful for monitoring systems that are polling-based.
- *
- * @return A non-null map from all record names to the records managed.
- */
- Map> getAllRecords();
-}
+/*
+ * MetricsContext.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.spi.OutputRecord;
+
+/**
+ * The main interface to the metrics package.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface MetricsContext {
+
+ /**
+ * Default period in seconds at which data is sent to the metrics system.
+ */
+ public static final int DEFAULT_PERIOD = 5;
+
+ /**
+ * Initialize this context.
+ * @param contextName The given name for this context
+ * @param factory The creator of this context
+ */
+ public void init(String contextName, ContextFactory factory);
+
+ /**
+ * Returns the context name.
+ *
+ * @return the context name
+ */
+ public abstract String getContextName();
+
+ /**
+ * Starts or restarts monitoring, the emitting of metrics records as they are
+ * updated.
+ */
+ public abstract void startMonitoring()
+ throws IOException;
+
+ /**
+ * Stops monitoring. This does not free any data that the implementation
+ * may have buffered for sending at the next timer event. It
+ * is OK to call startMonitoring() again after calling
+ * this.
+ * @see #close()
+ */
+ public abstract void stopMonitoring();
+
+ /**
+ * Returns true if monitoring is currently in progress.
+ */
+ public abstract boolean isMonitoring();
+
+ /**
+ * Stops monitoring and also frees any buffered data, returning this
+ * object to its initial state.
+ */
+ public abstract void close();
+
+ /**
+ * Creates a new MetricsRecord instance with the given recordName.
+ * Throws an exception if the metrics implementation is configured with a fixed
+ * set of record names and recordName is not in that set.
+ *
+ * @param recordName the name of the record
+ * @throws MetricsException if recordName conflicts with configuration data
+ */
+ public abstract MetricsRecord createRecord(String recordName);
+
+ /**
+ * Registers a callback to be called at regular time intervals, as
+ * determined by the implementation-class specific configuration.
+ *
+ * @param updater object to be run periodically; it should updated
+ * some metrics records and then return
+ */
+ public abstract void registerUpdater(Updater updater);
+
+ /**
+ * Removes a callback, if it exists.
+ *
+ * @param updater object to be removed from the callback list
+ */
+ public abstract void unregisterUpdater(Updater updater);
+
+ /**
+ * Returns the timer period.
+ */
+ public abstract int getPeriod();
+
+ /**
+ * Retrieves all the records managed by this MetricsContext.
+ * Useful for monitoring systems that are polling-based.
+ *
+ * @return A non-null map from all record names to the records managed.
+ */
+ Map> getAllRecords();
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsException.java
index 7a19d1bec73..de7139549f1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsException.java
@@ -1,47 +1,47 @@
-/*
- * MetricsException.java
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metrics;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * General-purpose, unchecked metrics exception.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-@InterfaceStability.Evolving
-public class MetricsException extends RuntimeException {
-
- private static final long serialVersionUID = -1643257498540498497L;
-
- /** Creates a new instance of MetricsException */
- public MetricsException() {
- }
-
- /** Creates a new instance of MetricsException
- *
- * @param message an error message
- */
- public MetricsException(String message) {
- super(message);
- }
-
-}
+/*
+ * MetricsException.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * General-purpose, unchecked metrics exception.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
+public class MetricsException extends RuntimeException {
+
+ private static final long serialVersionUID = -1643257498540498497L;
+
+ /** Creates a new instance of MetricsException */
+ public MetricsException() {
+ }
+
+ /** Creates a new instance of MetricsException
+ *
+ * @param message an error message
+ */
+ public MetricsException(String message) {
+ super(message);
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsRecord.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsRecord.java
index cbe3ea48c20..45701c570f9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsRecord.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsRecord.java
@@ -1,251 +1,251 @@
-/*
- * MetricsRecord.java
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metrics;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * A named and optionally tagged set of records to be sent to the metrics
- * system.
- *
- * A record name identifies the kind of data to be reported. For example, a
- * program reporting statistics relating to the disks on a computer might use
- * a record name "diskStats".
- *
- * A record has zero or more tags. A tag has a name and a value. To
- * continue the example, the "diskStats" record might use a tag named
- * "diskName" to identify a particular disk. Sometimes it is useful to have
- * more than one tag, so there might also be a "diskType" with value "ide" or
- * "scsi" or whatever.
- *
- * A record also has zero or more metrics. These are the named
- * values that are to be reported to the metrics system. In the "diskStats"
- * example, possible metric names would be "diskPercentFull", "diskPercentBusy",
- * "kbReadPerSecond", etc.
- *
- * The general procedure for using a MetricsRecord is to fill in its tag and
- * metric values, and then call update() to pass the record to the
- * client library.
- * Metric data is not immediately sent to the metrics system
- * each time that update() is called.
- * An internal table is maintained, identified by the record name. This
- * table has columns
- * corresponding to the tag and the metric names, and rows
- * corresponding to each unique set of tag values. An update
- * either modifies an existing row in the table, or adds a new row with a set of
- * tag values that are different from all the other rows. Note that if there
- * are no tags, then there can be at most one row in the table.
- *
- * Once a row is added to the table, its data will be sent to the metrics system
- * on every timer period, whether or not it has been updated since the previous
- * timer period. If this is inappropriate, for example if metrics were being
- * reported by some transient object in an application, the remove()
- * method can be used to remove the row and thus stop the data from being
- * sent.
- *
- * Note that the update() method is atomic. This means that it is
- * safe for different threads to be updating the same metric. More precisely,
- * it is OK for different threads to call update() on MetricsRecord instances
- * with the same set of tag names and tag values. Different threads should
- * not use the same MetricsRecord instance at the same time.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public interface MetricsRecord {
-
- /**
- * Returns the record name.
- *
- * @return the record name
- */
- public abstract String getRecordName();
-
- /**
- * Sets the named tag to the specified value. The tagValue may be null,
- * which is treated the same as an empty String.
- *
- * @param tagName name of the tag
- * @param tagValue new value of the tag
- * @throws MetricsException if the tagName conflicts with the configuration
- */
- public abstract void setTag(String tagName, String tagValue);
-
- /**
- * Sets the named tag to the specified value.
- *
- * @param tagName name of the tag
- * @param tagValue new value of the tag
- * @throws MetricsException if the tagName conflicts with the configuration
- */
- public abstract void setTag(String tagName, int tagValue);
-
- /**
- * Sets the named tag to the specified value.
- *
- * @param tagName name of the tag
- * @param tagValue new value of the tag
- * @throws MetricsException if the tagName conflicts with the configuration
- */
- public abstract void setTag(String tagName, long tagValue);
-
- /**
- * Sets the named tag to the specified value.
- *
- * @param tagName name of the tag
- * @param tagValue new value of the tag
- * @throws MetricsException if the tagName conflicts with the configuration
- */
- public abstract void setTag(String tagName, short tagValue);
-
- /**
- * Sets the named tag to the specified value.
- *
- * @param tagName name of the tag
- * @param tagValue new value of the tag
- * @throws MetricsException if the tagName conflicts with the configuration
- */
- public abstract void setTag(String tagName, byte tagValue);
-
- /**
- * Removes any tag of the specified name.
- *
- * @param tagName name of a tag
- */
- public abstract void removeTag(String tagName);
-
- /**
- * Sets the named metric to the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue new value of the metric
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public abstract void setMetric(String metricName, int metricValue);
-
- /**
- * Sets the named metric to the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue new value of the metric
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public abstract void setMetric(String metricName, long metricValue);
-
- /**
- * Sets the named metric to the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue new value of the metric
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public abstract void setMetric(String metricName, short metricValue);
-
- /**
- * Sets the named metric to the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue new value of the metric
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public abstract void setMetric(String metricName, byte metricValue);
-
- /**
- * Sets the named metric to the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue new value of the metric
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public abstract void setMetric(String metricName, float metricValue);
-
- /**
- * Increments the named metric by the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue incremental value
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public abstract void incrMetric(String metricName, int metricValue);
-
- /**
- * Increments the named metric by the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue incremental value
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public abstract void incrMetric(String metricName, long metricValue);
-
- /**
- * Increments the named metric by the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue incremental value
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public abstract void incrMetric(String metricName, short metricValue);
-
- /**
- * Increments the named metric by the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue incremental value
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public abstract void incrMetric(String metricName, byte metricValue);
-
- /**
- * Increments the named metric by the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue incremental value
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public abstract void incrMetric(String metricName, float metricValue);
-
- /**
- * Updates the table of buffered data which is to be sent periodically.
- * If the tag values match an existing row, that row is updated;
- * otherwise, a new row is added.
- */
- public abstract void update();
-
- /**
- * Removes, from the buffered data table, all rows having tags
- * that equal the tags that have been set on this record. For example,
- * if there are no tags on this record, all rows for this record name
- * would be removed. Or, if there is a single tag on this record, then
- * just rows containing a tag with the same name and value would be removed.
- */
- public abstract void remove();
-
-}
+/*
+ * MetricsRecord.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A named and optionally tagged set of records to be sent to the metrics
+ * system.
+ *
+ * A record name identifies the kind of data to be reported. For example, a
+ * program reporting statistics relating to the disks on a computer might use
+ * a record name "diskStats".
+ *
+ * A record has zero or more tags. A tag has a name and a value. To
+ * continue the example, the "diskStats" record might use a tag named
+ * "diskName" to identify a particular disk. Sometimes it is useful to have
+ * more than one tag, so there might also be a "diskType" with value "ide" or
+ * "scsi" or whatever.
+ *
+ * A record also has zero or more metrics. These are the named
+ * values that are to be reported to the metrics system. In the "diskStats"
+ * example, possible metric names would be "diskPercentFull", "diskPercentBusy",
+ * "kbReadPerSecond", etc.
+ *
+ * The general procedure for using a MetricsRecord is to fill in its tag and
+ * metric values, and then call update() to pass the record to the
+ * client library.
+ * Metric data is not immediately sent to the metrics system
+ * each time that update() is called.
+ * An internal table is maintained, identified by the record name. This
+ * table has columns
+ * corresponding to the tag and the metric names, and rows
+ * corresponding to each unique set of tag values. An update
+ * either modifies an existing row in the table, or adds a new row with a set of
+ * tag values that are different from all the other rows. Note that if there
+ * are no tags, then there can be at most one row in the table.
+ *
+ * Once a row is added to the table, its data will be sent to the metrics system
+ * on every timer period, whether or not it has been updated since the previous
+ * timer period. If this is inappropriate, for example if metrics were being
+ * reported by some transient object in an application, the remove()
+ * method can be used to remove the row and thus stop the data from being
+ * sent.
+ *
+ * Note that the update() method is atomic. This means that it is
+ * safe for different threads to be updating the same metric. More precisely,
+ * it is OK for different threads to call update() on MetricsRecord instances
+ * with the same set of tag names and tag values. Different threads should
+ * not use the same MetricsRecord instance at the same time.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface MetricsRecord {
+
+ /**
+ * Returns the record name.
+ *
+ * @return the record name
+ */
+ public abstract String getRecordName();
+
+ /**
+ * Sets the named tag to the specified value. The tagValue may be null,
+ * which is treated the same as an empty String.
+ *
+ * @param tagName name of the tag
+ * @param tagValue new value of the tag
+ * @throws MetricsException if the tagName conflicts with the configuration
+ */
+ public abstract void setTag(String tagName, String tagValue);
+
+ /**
+ * Sets the named tag to the specified value.
+ *
+ * @param tagName name of the tag
+ * @param tagValue new value of the tag
+ * @throws MetricsException if the tagName conflicts with the configuration
+ */
+ public abstract void setTag(String tagName, int tagValue);
+
+ /**
+ * Sets the named tag to the specified value.
+ *
+ * @param tagName name of the tag
+ * @param tagValue new value of the tag
+ * @throws MetricsException if the tagName conflicts with the configuration
+ */
+ public abstract void setTag(String tagName, long tagValue);
+
+ /**
+ * Sets the named tag to the specified value.
+ *
+ * @param tagName name of the tag
+ * @param tagValue new value of the tag
+ * @throws MetricsException if the tagName conflicts with the configuration
+ */
+ public abstract void setTag(String tagName, short tagValue);
+
+ /**
+ * Sets the named tag to the specified value.
+ *
+ * @param tagName name of the tag
+ * @param tagValue new value of the tag
+ * @throws MetricsException if the tagName conflicts with the configuration
+ */
+ public abstract void setTag(String tagName, byte tagValue);
+
+ /**
+ * Removes any tag of the specified name.
+ *
+ * @param tagName name of a tag
+ */
+ public abstract void removeTag(String tagName);
+
+ /**
+ * Sets the named metric to the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue new value of the metric
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public abstract void setMetric(String metricName, int metricValue);
+
+ /**
+ * Sets the named metric to the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue new value of the metric
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public abstract void setMetric(String metricName, long metricValue);
+
+ /**
+ * Sets the named metric to the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue new value of the metric
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public abstract void setMetric(String metricName, short metricValue);
+
+ /**
+ * Sets the named metric to the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue new value of the metric
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public abstract void setMetric(String metricName, byte metricValue);
+
+ /**
+ * Sets the named metric to the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue new value of the metric
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public abstract void setMetric(String metricName, float metricValue);
+
+ /**
+ * Increments the named metric by the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue incremental value
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public abstract void incrMetric(String metricName, int metricValue);
+
+ /**
+ * Increments the named metric by the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue incremental value
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public abstract void incrMetric(String metricName, long metricValue);
+
+ /**
+ * Increments the named metric by the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue incremental value
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public abstract void incrMetric(String metricName, short metricValue);
+
+ /**
+ * Increments the named metric by the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue incremental value
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public abstract void incrMetric(String metricName, byte metricValue);
+
+ /**
+ * Increments the named metric by the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue incremental value
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public abstract void incrMetric(String metricName, float metricValue);
+
+ /**
+ * Updates the table of buffered data which is to be sent periodically.
+ * If the tag values match an existing row, that row is updated;
+ * otherwise, a new row is added.
+ */
+ public abstract void update();
+
+ /**
+ * Removes, from the buffered data table, all rows having tags
+ * that equal the tags that have been set on this record. For example,
+ * if there are no tags on this record, all rows for this record name
+ * would be removed. Or, if there is a single tag on this record, then
+ * just rows containing a tag with the same name and value would be removed.
+ */
+ public abstract void remove();
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/file/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/file/FileContext.java
index f0aafa1c3a3..591b32c4914 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/file/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/file/FileContext.java
@@ -1,154 +1,154 @@
-/*
- * FileContext.java
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metrics.file;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.PrintWriter;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.metrics.ContextFactory;
-import org.apache.hadoop.metrics.spi.AbstractMetricsContext;
-import org.apache.hadoop.metrics.spi.OutputRecord;
-
-/**
- * Metrics context for writing metrics to a file.
- *
- * This class is configured by setting ContextFactory attributes which in turn
- * are usually configured through a properties file. All the attributes are
- * prefixed by the contextName. For example, the properties file might contain:
- *
- * @see org.apache.hadoop.metrics2.sink.FileSink for metrics 2.0.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-@Deprecated
-public class FileContext extends AbstractMetricsContext {
-
- /* Configuration attribute names */
- @InterfaceAudience.Private
- protected static final String FILE_NAME_PROPERTY = "fileName";
- @InterfaceAudience.Private
- protected static final String PERIOD_PROPERTY = "period";
-
- private File file = null; // file for metrics to be written to
- private PrintWriter writer = null;
-
- /** Creates a new instance of FileContext */
- @InterfaceAudience.Private
- public FileContext() {}
-
- @InterfaceAudience.Private
- public void init(String contextName, ContextFactory factory) {
- super.init(contextName, factory);
-
- String fileName = getAttribute(FILE_NAME_PROPERTY);
- if (fileName != null) {
- file = new File(fileName);
- }
-
- parseAndSetPeriod(PERIOD_PROPERTY);
- }
-
- /**
- * Returns the configured file name, or null.
- */
- @InterfaceAudience.Private
- public String getFileName() {
- if (file == null) {
- return null;
- } else {
- return file.getName();
- }
- }
-
- /**
- * Starts or restarts monitoring, by opening in append-mode, the
- * file specified by the fileName attribute,
- * if specified. Otherwise the data will be written to standard
- * output.
- */
- @InterfaceAudience.Private
- public void startMonitoring()
- throws IOException
- {
- if (file == null) {
- writer = new PrintWriter(new BufferedOutputStream(System.out));
- } else {
- writer = new PrintWriter(new FileWriter(file, true));
- }
- super.startMonitoring();
- }
-
- /**
- * Stops monitoring, closing the file.
- * @see #close()
- */
- @InterfaceAudience.Private
- public void stopMonitoring() {
- super.stopMonitoring();
-
- if (writer != null) {
- writer.close();
- writer = null;
- }
- }
-
- /**
- * Emits a metrics record to a file.
- */
- @InterfaceAudience.Private
- public void emitRecord(String contextName, String recordName, OutputRecord outRec) {
- writer.print(contextName);
- writer.print(".");
- writer.print(recordName);
- String separator = ": ";
- for (String tagName : outRec.getTagNames()) {
- writer.print(separator);
- separator = ", ";
- writer.print(tagName);
- writer.print("=");
- writer.print(outRec.getTag(tagName));
- }
- for (String metricName : outRec.getMetricNames()) {
- writer.print(separator);
- separator = ", ";
- writer.print(metricName);
- writer.print("=");
- writer.print(outRec.getMetric(metricName));
- }
- writer.println();
- }
-
- /**
- * Flushes the output writer, forcing updates to disk.
- */
- @InterfaceAudience.Private
- public void flush() {
- writer.flush();
- }
-}
+/*
+ * FileContext.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics.file;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.ContextFactory;
+import org.apache.hadoop.metrics.spi.AbstractMetricsContext;
+import org.apache.hadoop.metrics.spi.OutputRecord;
+
+/**
+ * Metrics context for writing metrics to a file.
+ *
+ * This class is configured by setting ContextFactory attributes which in turn
+ * are usually configured through a properties file. All the attributes are
+ * prefixed by the contextName. For example, the properties file might contain:
+ *
+ * @see org.apache.hadoop.metrics2.sink.FileSink for metrics 2.0.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+@Deprecated
+public class FileContext extends AbstractMetricsContext {
+
+ /* Configuration attribute names */
+ @InterfaceAudience.Private
+ protected static final String FILE_NAME_PROPERTY = "fileName";
+ @InterfaceAudience.Private
+ protected static final String PERIOD_PROPERTY = "period";
+
+ private File file = null; // file for metrics to be written to
+ private PrintWriter writer = null;
+
+ /** Creates a new instance of FileContext */
+ @InterfaceAudience.Private
+ public FileContext() {}
+
+ @InterfaceAudience.Private
+ public void init(String contextName, ContextFactory factory) {
+ super.init(contextName, factory);
+
+ String fileName = getAttribute(FILE_NAME_PROPERTY);
+ if (fileName != null) {
+ file = new File(fileName);
+ }
+
+ parseAndSetPeriod(PERIOD_PROPERTY);
+ }
+
+ /**
+ * Returns the configured file name, or null.
+ */
+ @InterfaceAudience.Private
+ public String getFileName() {
+ if (file == null) {
+ return null;
+ } else {
+ return file.getName();
+ }
+ }
+
+ /**
+ * Starts or restarts monitoring, by opening in append-mode, the
+ * file specified by the fileName attribute,
+ * if specified. Otherwise the data will be written to standard
+ * output.
+ */
+ @InterfaceAudience.Private
+ public void startMonitoring()
+ throws IOException
+ {
+ if (file == null) {
+ writer = new PrintWriter(new BufferedOutputStream(System.out));
+ } else {
+ writer = new PrintWriter(new FileWriter(file, true));
+ }
+ super.startMonitoring();
+ }
+
+ /**
+ * Stops monitoring, closing the file.
+ * @see #close()
+ */
+ @InterfaceAudience.Private
+ public void stopMonitoring() {
+ super.stopMonitoring();
+
+ if (writer != null) {
+ writer.close();
+ writer = null;
+ }
+ }
+
+ /**
+ * Emits a metrics record to a file.
+ */
+ @InterfaceAudience.Private
+ public void emitRecord(String contextName, String recordName, OutputRecord outRec) {
+ writer.print(contextName);
+ writer.print(".");
+ writer.print(recordName);
+ String separator = ": ";
+ for (String tagName : outRec.getTagNames()) {
+ writer.print(separator);
+ separator = ", ";
+ writer.print(tagName);
+ writer.print("=");
+ writer.print(outRec.getTag(tagName));
+ }
+ for (String metricName : outRec.getMetricNames()) {
+ writer.print(separator);
+ separator = ", ";
+ writer.print(metricName);
+ writer.print("=");
+ writer.print(outRec.getMetric(metricName));
+ }
+ writer.println();
+ }
+
+ /**
+ * Flushes the output writer, forcing updates to disk.
+ */
+ @InterfaceAudience.Private
+ public void flush() {
+ writer.flush();
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java
index 947b0a12958..6e1e210e670 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java
@@ -1,481 +1,481 @@
-/*
- * AbstractMetricsContext.java
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metrics.spi;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.TreeMap;
-import java.util.Map.Entry;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.metrics.ContextFactory;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsException;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.Updater;
-
-/**
- * The main class of the Service Provider Interface. This class should be
- * extended in order to integrate the Metrics API with a specific metrics
- * client library.
- *
- * This class implements the internal table of metric data, and the timer
- * on which data is to be sent to the metrics system. Subclasses must
- * override the abstract emitRecord method in order to transmit
- * the data.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public abstract class AbstractMetricsContext implements MetricsContext {
-
- private int period = MetricsContext.DEFAULT_PERIOD;
- private Timer timer = null;
-
- private Set updaters = new HashSet(1);
- private volatile boolean isMonitoring = false;
-
- private ContextFactory factory = null;
- private String contextName = null;
-
- @InterfaceAudience.Private
- public static class TagMap extends TreeMap {
- private static final long serialVersionUID = 3546309335061952993L;
- TagMap() {
- super();
- }
- TagMap(TagMap orig) {
- super(orig);
- }
- /**
- * Returns true if this tagmap contains every tag in other.
- */
- public boolean containsAll(TagMap other) {
- for (Map.Entry entry : other.entrySet()) {
- Object value = get(entry.getKey());
- if (value == null || !value.equals(entry.getValue())) {
- // either key does not exist here, or the value is different
- return false;
- }
- }
- return true;
- }
- }
-
- @InterfaceAudience.Private
- public static class MetricMap extends TreeMap {
- private static final long serialVersionUID = -7495051861141631609L;
- MetricMap() {
- super();
- }
- MetricMap(MetricMap orig) {
- super(orig);
- }
- }
-
- static class RecordMap extends HashMap {
- private static final long serialVersionUID = 259835619700264611L;
- }
-
- private Map bufferedData = new HashMap();
-
-
- /**
- * Creates a new instance of AbstractMetricsContext
- */
- protected AbstractMetricsContext() {
- }
-
- /**
- * Initializes the context.
- */
- public void init(String contextName, ContextFactory factory)
- {
- this.contextName = contextName;
- this.factory = factory;
- }
-
- /**
- * Convenience method for subclasses to access factory attributes.
- */
- protected String getAttribute(String attributeName) {
- String factoryAttribute = contextName + "." + attributeName;
- return (String) factory.getAttribute(factoryAttribute);
- }
-
- /**
- * Returns an attribute-value map derived from the factory attributes
- * by finding all factory attributes that begin with
- * contextName.tableName. The returned map consists of
- * those attributes with the contextName and tableName stripped off.
- */
- protected Map getAttributeTable(String tableName) {
- String prefix = contextName + "." + tableName + ".";
- Map result = new HashMap();
- for (String attributeName : factory.getAttributeNames()) {
- if (attributeName.startsWith(prefix)) {
- String name = attributeName.substring(prefix.length());
- String value = (String) factory.getAttribute(attributeName);
- result.put(name, value);
- }
- }
- return result;
- }
-
- /**
- * Returns the context name.
- */
- public String getContextName() {
- return contextName;
- }
-
- /**
- * Returns the factory by which this context was created.
- */
- public ContextFactory getContextFactory() {
- return factory;
- }
-
- /**
- * Starts or restarts monitoring, the emitting of metrics records.
- */
- public synchronized void startMonitoring()
- throws IOException {
- if (!isMonitoring) {
- startTimer();
- isMonitoring = true;
- }
- }
-
- /**
- * Stops monitoring. This does not free buffered data.
- * @see #close()
- */
- public synchronized void stopMonitoring() {
- if (isMonitoring) {
- stopTimer();
- isMonitoring = false;
- }
- }
-
- /**
- * Returns true if monitoring is currently in progress.
- */
- public boolean isMonitoring() {
- return isMonitoring;
- }
-
- /**
- * Stops monitoring and frees buffered data, returning this
- * object to its initial state.
- */
- public synchronized void close() {
- stopMonitoring();
- clearUpdaters();
- }
-
- /**
- * Creates a new AbstractMetricsRecord instance with the given recordName.
- * Throws an exception if the metrics implementation is configured with a fixed
- * set of record names and recordName is not in that set.
- *
- * @param recordName the name of the record
- * @throws MetricsException if recordName conflicts with configuration data
- */
- public final synchronized MetricsRecord createRecord(String recordName) {
- if (bufferedData.get(recordName) == null) {
- bufferedData.put(recordName, new RecordMap());
- }
- return newRecord(recordName);
- }
-
- /**
- * Subclasses should override this if they subclass MetricsRecordImpl.
- * @param recordName the name of the record
- * @return newly created instance of MetricsRecordImpl or subclass
- */
- protected MetricsRecord newRecord(String recordName) {
- return new MetricsRecordImpl(recordName, this);
- }
-
- /**
- * Registers a callback to be called at time intervals determined by
- * the configuration.
- *
- * @param updater object to be run periodically; it should update
- * some metrics records
- */
- public synchronized void registerUpdater(final Updater updater) {
- if (!updaters.contains(updater)) {
- updaters.add(updater);
- }
- }
-
- /**
- * Removes a callback, if it exists.
- *
- * @param updater object to be removed from the callback list
- */
- public synchronized void unregisterUpdater(Updater updater) {
- updaters.remove(updater);
- }
-
- private synchronized void clearUpdaters() {
- updaters.clear();
- }
-
- /**
- * Starts timer if it is not already started
- */
- private synchronized void startTimer() {
- if (timer == null) {
- timer = new Timer("Timer thread for monitoring " + getContextName(),
- true);
- TimerTask task = new TimerTask() {
- public void run() {
- try {
- timerEvent();
- }
- catch (IOException ioe) {
- ioe.printStackTrace();
- }
- }
- };
- long millis = period * 1000;
- timer.scheduleAtFixedRate(task, millis, millis);
- }
- }
-
- /**
- * Stops timer if it is running
- */
- private synchronized void stopTimer() {
- if (timer != null) {
- timer.cancel();
- timer = null;
- }
- }
-
- /**
- * Timer callback.
- */
- private void timerEvent() throws IOException {
- if (isMonitoring) {
- Collection myUpdaters;
- synchronized (this) {
- myUpdaters = new ArrayList(updaters);
- }
- // Run all the registered updates without holding a lock
- // on this context
- for (Updater updater : myUpdaters) {
- try {
- updater.doUpdates(this);
- }
- catch (Throwable throwable) {
- throwable.printStackTrace();
- }
- }
- emitRecords();
- }
- }
-
- /**
- * Emits the records.
- */
- private synchronized void emitRecords() throws IOException {
- for (String recordName : bufferedData.keySet()) {
- RecordMap recordMap = bufferedData.get(recordName);
- synchronized (recordMap) {
- Set> entrySet = recordMap.entrySet ();
- for (Entry entry : entrySet) {
- OutputRecord outRec = new OutputRecord(entry.getKey(), entry.getValue());
- emitRecord(contextName, recordName, outRec);
- }
- }
- }
- flush();
- }
-
- /**
- * Retrieves all the records managed by this MetricsContext.
- * Useful for monitoring systems that are polling-based.
- * @return A non-null collection of all monitoring records.
- */
- public synchronized Map> getAllRecords() {
- Map> out = new TreeMap>();
- for (String recordName : bufferedData.keySet()) {
- RecordMap recordMap = bufferedData.get(recordName);
- synchronized (recordMap) {
- List records = new ArrayList();
- Set> entrySet = recordMap.entrySet();
- for (Entry entry : entrySet) {
- OutputRecord outRec = new OutputRecord(entry.getKey(), entry.getValue());
- records.add(outRec);
- }
- out.put(recordName, records);
- }
- }
- return out;
- }
-
- /**
- * Sends a record to the metrics system.
- */
- protected abstract void emitRecord(String contextName, String recordName,
- OutputRecord outRec) throws IOException;
-
- /**
- * Called each period after all records have been emitted, this method does nothing.
- * Subclasses may override it in order to perform some kind of flush.
- */
- protected void flush() throws IOException {
- }
-
- /**
- * Called by MetricsRecordImpl.update(). Creates or updates a row in
- * the internal table of metric data.
- */
- protected void update(MetricsRecordImpl record) {
- String recordName = record.getRecordName();
- TagMap tagTable = record.getTagTable();
- Map metricUpdates = record.getMetricTable();
-
- RecordMap recordMap = getRecordMap(recordName);
- synchronized (recordMap) {
- MetricMap metricMap = recordMap.get(tagTable);
- if (metricMap == null) {
- metricMap = new MetricMap();
- TagMap tagMap = new TagMap(tagTable); // clone tags
- recordMap.put(tagMap, metricMap);
- }
-
- Set> entrySet = metricUpdates.entrySet();
- for (Entry entry : entrySet) {
- String metricName = entry.getKey ();
- MetricValue updateValue = entry.getValue ();
- Number updateNumber = updateValue.getNumber();
- Number currentNumber = metricMap.get(metricName);
- if (currentNumber == null || updateValue.isAbsolute()) {
- metricMap.put(metricName, updateNumber);
- }
- else {
- Number newNumber = sum(updateNumber, currentNumber);
- metricMap.put(metricName, newNumber);
- }
- }
- }
- }
-
- private synchronized RecordMap getRecordMap(String recordName) {
- return bufferedData.get(recordName);
- }
-
- /**
- * Adds two numbers, coercing the second to the type of the first.
- *
- */
- private Number sum(Number a, Number b) {
- if (a instanceof Integer) {
- return Integer.valueOf(a.intValue() + b.intValue());
- }
- else if (a instanceof Float) {
- return new Float(a.floatValue() + b.floatValue());
- }
- else if (a instanceof Short) {
- return Short.valueOf((short)(a.shortValue() + b.shortValue()));
- }
- else if (a instanceof Byte) {
- return Byte.valueOf((byte)(a.byteValue() + b.byteValue()));
- }
- else if (a instanceof Long) {
- return Long.valueOf((a.longValue() + b.longValue()));
- }
- else {
- // should never happen
- throw new MetricsException("Invalid number type");
- }
-
- }
-
- /**
- * Called by MetricsRecordImpl.remove(). Removes all matching rows in
- * the internal table of metric data. A row matches if it has the same
- * tag names and values as record, but it may also have additional
- * tags.
- */
- protected void remove(MetricsRecordImpl record) {
- String recordName = record.getRecordName();
- TagMap tagTable = record.getTagTable();
-
- RecordMap recordMap = getRecordMap(recordName);
- synchronized (recordMap) {
- Iterator it = recordMap.keySet().iterator();
- while (it.hasNext()) {
- TagMap rowTags = it.next();
- if (rowTags.containsAll(tagTable)) {
- it.remove();
- }
- }
- }
- }
-
- /**
- * Returns the timer period.
- */
- public int getPeriod() {
- return period;
- }
-
- /**
- * Sets the timer period
- */
- protected void setPeriod(int period) {
- this.period = period;
- }
-
- /**
- * If a period is set in the attribute passed in, override
- * the default with it.
- */
- protected void parseAndSetPeriod(String attributeName) {
- String periodStr = getAttribute(attributeName);
- if (periodStr != null) {
- int period = 0;
- try {
- period = Integer.parseInt(periodStr);
- } catch (NumberFormatException nfe) {
- }
- if (period <= 0) {
- throw new MetricsException("Invalid period: " + periodStr);
- }
- setPeriod(period);
- }
- }
-}
+/*
+ * AbstractMetricsContext.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics.spi;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.TreeMap;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.ContextFactory;
+import org.apache.hadoop.metrics.MetricsContext;
+import org.apache.hadoop.metrics.MetricsException;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.Updater;
+
+/**
+ * The main class of the Service Provider Interface. This class should be
+ * extended in order to integrate the Metrics API with a specific metrics
+ * client library.
+ *
+ * This class implements the internal table of metric data, and the timer
+ * on which data is to be sent to the metrics system. Subclasses must
+ * override the abstract emitRecord method in order to transmit
+ * the data.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class AbstractMetricsContext implements MetricsContext {
+
+ private int period = MetricsContext.DEFAULT_PERIOD;
+ private Timer timer = null;
+
+ private Set updaters = new HashSet(1);
+ private volatile boolean isMonitoring = false;
+
+ private ContextFactory factory = null;
+ private String contextName = null;
+
+ @InterfaceAudience.Private
+ public static class TagMap extends TreeMap {
+ private static final long serialVersionUID = 3546309335061952993L;
+ TagMap() {
+ super();
+ }
+ TagMap(TagMap orig) {
+ super(orig);
+ }
+ /**
+ * Returns true if this tagmap contains every tag in other.
+ */
+ public boolean containsAll(TagMap other) {
+ for (Map.Entry entry : other.entrySet()) {
+ Object value = get(entry.getKey());
+ if (value == null || !value.equals(entry.getValue())) {
+ // either key does not exist here, or the value is different
+ return false;
+ }
+ }
+ return true;
+ }
+ }
+
+ @InterfaceAudience.Private
+ public static class MetricMap extends TreeMap {
+ private static final long serialVersionUID = -7495051861141631609L;
+ MetricMap() {
+ super();
+ }
+ MetricMap(MetricMap orig) {
+ super(orig);
+ }
+ }
+
+ static class RecordMap extends HashMap {
+ private static final long serialVersionUID = 259835619700264611L;
+ }
+
+ private Map bufferedData = new HashMap();
+
+
+ /**
+ * Creates a new instance of AbstractMetricsContext
+ */
+ protected AbstractMetricsContext() {
+ }
+
+ /**
+ * Initializes the context.
+ */
+ public void init(String contextName, ContextFactory factory)
+ {
+ this.contextName = contextName;
+ this.factory = factory;
+ }
+
+ /**
+ * Convenience method for subclasses to access factory attributes.
+ */
+ protected String getAttribute(String attributeName) {
+ String factoryAttribute = contextName + "." + attributeName;
+ return (String) factory.getAttribute(factoryAttribute);
+ }
+
+ /**
+ * Returns an attribute-value map derived from the factory attributes
+ * by finding all factory attributes that begin with
+ * contextName.tableName. The returned map consists of
+ * those attributes with the contextName and tableName stripped off.
+ */
+ protected Map getAttributeTable(String tableName) {
+ String prefix = contextName + "." + tableName + ".";
+ Map result = new HashMap();
+ for (String attributeName : factory.getAttributeNames()) {
+ if (attributeName.startsWith(prefix)) {
+ String name = attributeName.substring(prefix.length());
+ String value = (String) factory.getAttribute(attributeName);
+ result.put(name, value);
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Returns the context name.
+ */
+ public String getContextName() {
+ return contextName;
+ }
+
+ /**
+ * Returns the factory by which this context was created.
+ */
+ public ContextFactory getContextFactory() {
+ return factory;
+ }
+
+ /**
+ * Starts or restarts monitoring, the emitting of metrics records.
+ */
+ public synchronized void startMonitoring()
+ throws IOException {
+ if (!isMonitoring) {
+ startTimer();
+ isMonitoring = true;
+ }
+ }
+
+ /**
+ * Stops monitoring. This does not free buffered data.
+ * @see #close()
+ */
+ public synchronized void stopMonitoring() {
+ if (isMonitoring) {
+ stopTimer();
+ isMonitoring = false;
+ }
+ }
+
+ /**
+ * Returns true if monitoring is currently in progress.
+ */
+ public boolean isMonitoring() {
+ return isMonitoring;
+ }
+
+ /**
+ * Stops monitoring and frees buffered data, returning this
+ * object to its initial state.
+ */
+ public synchronized void close() {
+ stopMonitoring();
+ clearUpdaters();
+ }
+
+ /**
+ * Creates a new AbstractMetricsRecord instance with the given recordName.
+ * Throws an exception if the metrics implementation is configured with a fixed
+ * set of record names and recordName is not in that set.
+ *
+ * @param recordName the name of the record
+ * @throws MetricsException if recordName conflicts with configuration data
+ */
+ public final synchronized MetricsRecord createRecord(String recordName) {
+ if (bufferedData.get(recordName) == null) {
+ bufferedData.put(recordName, new RecordMap());
+ }
+ return newRecord(recordName);
+ }
+
+ /**
+ * Subclasses should override this if they subclass MetricsRecordImpl.
+ * @param recordName the name of the record
+ * @return newly created instance of MetricsRecordImpl or subclass
+ */
+ protected MetricsRecord newRecord(String recordName) {
+ return new MetricsRecordImpl(recordName, this);
+ }
+
+ /**
+ * Registers a callback to be called at time intervals determined by
+ * the configuration.
+ *
+ * @param updater object to be run periodically; it should update
+ * some metrics records
+ */
+ public synchronized void registerUpdater(final Updater updater) {
+ if (!updaters.contains(updater)) {
+ updaters.add(updater);
+ }
+ }
+
+ /**
+ * Removes a callback, if it exists.
+ *
+ * @param updater object to be removed from the callback list
+ */
+ public synchronized void unregisterUpdater(Updater updater) {
+ updaters.remove(updater);
+ }
+
+ private synchronized void clearUpdaters() {
+ updaters.clear();
+ }
+
+ /**
+ * Starts timer if it is not already started
+ */
+ private synchronized void startTimer() {
+ if (timer == null) {
+ timer = new Timer("Timer thread for monitoring " + getContextName(),
+ true);
+ TimerTask task = new TimerTask() {
+ public void run() {
+ try {
+ timerEvent();
+ }
+ catch (IOException ioe) {
+ ioe.printStackTrace();
+ }
+ }
+ };
+ long millis = period * 1000;
+ timer.scheduleAtFixedRate(task, millis, millis);
+ }
+ }
+
+ /**
+ * Stops timer if it is running
+ */
+ private synchronized void stopTimer() {
+ if (timer != null) {
+ timer.cancel();
+ timer = null;
+ }
+ }
+
+ /**
+ * Timer callback.
+ */
+ private void timerEvent() throws IOException {
+ if (isMonitoring) {
+ Collection myUpdaters;
+ synchronized (this) {
+ myUpdaters = new ArrayList(updaters);
+ }
+ // Run all the registered updates without holding a lock
+ // on this context
+ for (Updater updater : myUpdaters) {
+ try {
+ updater.doUpdates(this);
+ }
+ catch (Throwable throwable) {
+ throwable.printStackTrace();
+ }
+ }
+ emitRecords();
+ }
+ }
+
+ /**
+ * Emits the records.
+ */
+ private synchronized void emitRecords() throws IOException {
+ for (String recordName : bufferedData.keySet()) {
+ RecordMap recordMap = bufferedData.get(recordName);
+ synchronized (recordMap) {
+ Set> entrySet = recordMap.entrySet ();
+ for (Entry entry : entrySet) {
+ OutputRecord outRec = new OutputRecord(entry.getKey(), entry.getValue());
+ emitRecord(contextName, recordName, outRec);
+ }
+ }
+ }
+ flush();
+ }
+
+ /**
+ * Retrieves all the records managed by this MetricsContext.
+ * Useful for monitoring systems that are polling-based.
+ * @return A non-null collection of all monitoring records.
+ */
+ public synchronized Map> getAllRecords() {
+ Map> out = new TreeMap>();
+ for (String recordName : bufferedData.keySet()) {
+ RecordMap recordMap = bufferedData.get(recordName);
+ synchronized (recordMap) {
+ List records = new ArrayList();
+ Set> entrySet = recordMap.entrySet();
+ for (Entry entry : entrySet) {
+ OutputRecord outRec = new OutputRecord(entry.getKey(), entry.getValue());
+ records.add(outRec);
+ }
+ out.put(recordName, records);
+ }
+ }
+ return out;
+ }
+
+ /**
+ * Sends a record to the metrics system.
+ */
+ protected abstract void emitRecord(String contextName, String recordName,
+ OutputRecord outRec) throws IOException;
+
+ /**
+ * Called each period after all records have been emitted, this method does nothing.
+ * Subclasses may override it in order to perform some kind of flush.
+ */
+ protected void flush() throws IOException {
+ }
+
+ /**
+ * Called by MetricsRecordImpl.update(). Creates or updates a row in
+ * the internal table of metric data.
+ */
+ protected void update(MetricsRecordImpl record) {
+ String recordName = record.getRecordName();
+ TagMap tagTable = record.getTagTable();
+ Map metricUpdates = record.getMetricTable();
+
+ RecordMap recordMap = getRecordMap(recordName);
+ synchronized (recordMap) {
+ MetricMap metricMap = recordMap.get(tagTable);
+ if (metricMap == null) {
+ metricMap = new MetricMap();
+ TagMap tagMap = new TagMap(tagTable); // clone tags
+ recordMap.put(tagMap, metricMap);
+ }
+
+ Set> entrySet = metricUpdates.entrySet();
+ for (Entry entry : entrySet) {
+ String metricName = entry.getKey ();
+ MetricValue updateValue = entry.getValue ();
+ Number updateNumber = updateValue.getNumber();
+ Number currentNumber = metricMap.get(metricName);
+ if (currentNumber == null || updateValue.isAbsolute()) {
+ metricMap.put(metricName, updateNumber);
+ }
+ else {
+ Number newNumber = sum(updateNumber, currentNumber);
+ metricMap.put(metricName, newNumber);
+ }
+ }
+ }
+ }
+
+ private synchronized RecordMap getRecordMap(String recordName) {
+ return bufferedData.get(recordName);
+ }
+
+ /**
+ * Adds two numbers, coercing the second to the type of the first.
+ *
+ */
+ private Number sum(Number a, Number b) {
+ if (a instanceof Integer) {
+ return Integer.valueOf(a.intValue() + b.intValue());
+ }
+ else if (a instanceof Float) {
+ return new Float(a.floatValue() + b.floatValue());
+ }
+ else if (a instanceof Short) {
+ return Short.valueOf((short)(a.shortValue() + b.shortValue()));
+ }
+ else if (a instanceof Byte) {
+ return Byte.valueOf((byte)(a.byteValue() + b.byteValue()));
+ }
+ else if (a instanceof Long) {
+ return Long.valueOf((a.longValue() + b.longValue()));
+ }
+ else {
+ // should never happen
+ throw new MetricsException("Invalid number type");
+ }
+
+ }
+
+ /**
+ * Called by MetricsRecordImpl.remove(). Removes all matching rows in
+ * the internal table of metric data. A row matches if it has the same
+ * tag names and values as record, but it may also have additional
+ * tags.
+ */
+ protected void remove(MetricsRecordImpl record) {
+ String recordName = record.getRecordName();
+ TagMap tagTable = record.getTagTable();
+
+ RecordMap recordMap = getRecordMap(recordName);
+ synchronized (recordMap) {
+ Iterator it = recordMap.keySet().iterator();
+ while (it.hasNext()) {
+ TagMap rowTags = it.next();
+ if (rowTags.containsAll(tagTable)) {
+ it.remove();
+ }
+ }
+ }
+ }
+
+ /**
+ * Returns the timer period.
+ */
+ public int getPeriod() {
+ return period;
+ }
+
+ /**
+ * Sets the timer period
+ */
+ protected void setPeriod(int period) {
+ this.period = period;
+ }
+
+ /**
+ * If a period is set in the attribute passed in, override
+ * the default with it.
+ */
+ protected void parseAndSetPeriod(String attributeName) {
+ String periodStr = getAttribute(attributeName);
+ if (periodStr != null) {
+ int period = 0;
+ try {
+ period = Integer.parseInt(periodStr);
+ } catch (NumberFormatException nfe) {
+ }
+ if (period <= 0) {
+ throw new MetricsException("Invalid period: " + periodStr);
+ }
+ setPeriod(period);
+ }
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/MetricsRecordImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/MetricsRecordImpl.java
index 85cf00e0918..0c379b6a329 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/MetricsRecordImpl.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/MetricsRecordImpl.java
@@ -1,281 +1,281 @@
-/*
- * MetricsRecordImpl.java
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metrics.spi;
-
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.metrics.MetricsException;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap;
-
-/**
- * An implementation of MetricsRecord. Keeps a back-pointer to the context
- * from which it was created, and delegates back to it on update
- * and remove().
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class MetricsRecordImpl implements MetricsRecord {
-
- private TagMap tagTable = new TagMap();
- private Map metricTable = new LinkedHashMap();
-
- private String recordName;
- private AbstractMetricsContext context;
-
-
- /** Creates a new instance of FileRecord */
- protected MetricsRecordImpl(String recordName, AbstractMetricsContext context)
- {
- this.recordName = recordName;
- this.context = context;
- }
-
- /**
- * Returns the record name.
- *
- * @return the record name
- */
- public String getRecordName() {
- return recordName;
- }
-
- /**
- * Sets the named tag to the specified value.
- *
- * @param tagName name of the tag
- * @param tagValue new value of the tag
- * @throws MetricsException if the tagName conflicts with the configuration
- */
- public void setTag(String tagName, String tagValue) {
- if (tagValue == null) {
- tagValue = "";
- }
- tagTable.put(tagName, tagValue);
- }
-
- /**
- * Sets the named tag to the specified value.
- *
- * @param tagName name of the tag
- * @param tagValue new value of the tag
- * @throws MetricsException if the tagName conflicts with the configuration
- */
- public void setTag(String tagName, int tagValue) {
- tagTable.put(tagName, Integer.valueOf(tagValue));
- }
-
- /**
- * Sets the named tag to the specified value.
- *
- * @param tagName name of the tag
- * @param tagValue new value of the tag
- * @throws MetricsException if the tagName conflicts with the configuration
- */
- public void setTag(String tagName, long tagValue) {
- tagTable.put(tagName, Long.valueOf(tagValue));
- }
-
- /**
- * Sets the named tag to the specified value.
- *
- * @param tagName name of the tag
- * @param tagValue new value of the tag
- * @throws MetricsException if the tagName conflicts with the configuration
- */
- public void setTag(String tagName, short tagValue) {
- tagTable.put(tagName, Short.valueOf(tagValue));
- }
-
- /**
- * Sets the named tag to the specified value.
- *
- * @param tagName name of the tag
- * @param tagValue new value of the tag
- * @throws MetricsException if the tagName conflicts with the configuration
- */
- public void setTag(String tagName, byte tagValue) {
- tagTable.put(tagName, Byte.valueOf(tagValue));
- }
-
- /**
- * Removes any tag of the specified name.
- */
- public void removeTag(String tagName) {
- tagTable.remove(tagName);
- }
-
- /**
- * Sets the named metric to the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue new value of the metric
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public void setMetric(String metricName, int metricValue) {
- setAbsolute(metricName, Integer.valueOf(metricValue));
- }
-
- /**
- * Sets the named metric to the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue new value of the metric
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public void setMetric(String metricName, long metricValue) {
- setAbsolute(metricName, Long.valueOf(metricValue));
- }
-
- /**
- * Sets the named metric to the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue new value of the metric
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public void setMetric(String metricName, short metricValue) {
- setAbsolute(metricName, Short.valueOf(metricValue));
- }
-
- /**
- * Sets the named metric to the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue new value of the metric
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public void setMetric(String metricName, byte metricValue) {
- setAbsolute(metricName, Byte.valueOf(metricValue));
- }
-
- /**
- * Sets the named metric to the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue new value of the metric
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public void setMetric(String metricName, float metricValue) {
- setAbsolute(metricName, new Float(metricValue));
- }
-
- /**
- * Increments the named metric by the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue incremental value
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public void incrMetric(String metricName, int metricValue) {
- setIncrement(metricName, Integer.valueOf(metricValue));
- }
-
- /**
- * Increments the named metric by the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue incremental value
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public void incrMetric(String metricName, long metricValue) {
- setIncrement(metricName, Long.valueOf(metricValue));
- }
-
- /**
- * Increments the named metric by the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue incremental value
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public void incrMetric(String metricName, short metricValue) {
- setIncrement(metricName, Short.valueOf(metricValue));
- }
-
- /**
- * Increments the named metric by the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue incremental value
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public void incrMetric(String metricName, byte metricValue) {
- setIncrement(metricName, Byte.valueOf(metricValue));
- }
-
- /**
- * Increments the named metric by the specified value.
- *
- * @param metricName name of the metric
- * @param metricValue incremental value
- * @throws MetricsException if the metricName or the type of the metricValue
- * conflicts with the configuration
- */
- public void incrMetric(String metricName, float metricValue) {
- setIncrement(metricName, new Float(metricValue));
- }
-
- private void setAbsolute(String metricName, Number metricValue) {
- metricTable.put(metricName, new MetricValue(metricValue, MetricValue.ABSOLUTE));
- }
-
- private void setIncrement(String metricName, Number metricValue) {
- metricTable.put(metricName, new MetricValue(metricValue, MetricValue.INCREMENT));
- }
-
- /**
- * Updates the table of buffered data which is to be sent periodically.
- * If the tag values match an existing row, that row is updated;
- * otherwise, a new row is added.
- */
- public void update() {
- context.update(this);
- }
-
- /**
- * Removes the row, if it exists, in the buffered data table having tags
- * that equal the tags that have been set on this record.
- */
- public void remove() {
- context.remove(this);
- }
-
- TagMap getTagTable() {
- return tagTable;
- }
-
- Map getMetricTable() {
- return metricTable;
- }
-}
+/*
+ * MetricsRecordImpl.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics.spi;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.MetricsException;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap;
+
+/**
+ * An implementation of MetricsRecord. Keeps a back-pointer to the context
+ * from which it was created, and delegates back to it on update
+ * and remove().
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class MetricsRecordImpl implements MetricsRecord {
+
+ private TagMap tagTable = new TagMap();
+ private Map metricTable = new LinkedHashMap();
+
+ private String recordName;
+ private AbstractMetricsContext context;
+
+
+ /** Creates a new instance of FileRecord */
+ protected MetricsRecordImpl(String recordName, AbstractMetricsContext context)
+ {
+ this.recordName = recordName;
+ this.context = context;
+ }
+
+ /**
+ * Returns the record name.
+ *
+ * @return the record name
+ */
+ public String getRecordName() {
+ return recordName;
+ }
+
+ /**
+ * Sets the named tag to the specified value.
+ *
+ * @param tagName name of the tag
+ * @param tagValue new value of the tag
+ * @throws MetricsException if the tagName conflicts with the configuration
+ */
+ public void setTag(String tagName, String tagValue) {
+ if (tagValue == null) {
+ tagValue = "";
+ }
+ tagTable.put(tagName, tagValue);
+ }
+
+ /**
+ * Sets the named tag to the specified value.
+ *
+ * @param tagName name of the tag
+ * @param tagValue new value of the tag
+ * @throws MetricsException if the tagName conflicts with the configuration
+ */
+ public void setTag(String tagName, int tagValue) {
+ tagTable.put(tagName, Integer.valueOf(tagValue));
+ }
+
+ /**
+ * Sets the named tag to the specified value.
+ *
+ * @param tagName name of the tag
+ * @param tagValue new value of the tag
+ * @throws MetricsException if the tagName conflicts with the configuration
+ */
+ public void setTag(String tagName, long tagValue) {
+ tagTable.put(tagName, Long.valueOf(tagValue));
+ }
+
+ /**
+ * Sets the named tag to the specified value.
+ *
+ * @param tagName name of the tag
+ * @param tagValue new value of the tag
+ * @throws MetricsException if the tagName conflicts with the configuration
+ */
+ public void setTag(String tagName, short tagValue) {
+ tagTable.put(tagName, Short.valueOf(tagValue));
+ }
+
+ /**
+ * Sets the named tag to the specified value.
+ *
+ * @param tagName name of the tag
+ * @param tagValue new value of the tag
+ * @throws MetricsException if the tagName conflicts with the configuration
+ */
+ public void setTag(String tagName, byte tagValue) {
+ tagTable.put(tagName, Byte.valueOf(tagValue));
+ }
+
+ /**
+ * Removes any tag of the specified name.
+ */
+ public void removeTag(String tagName) {
+ tagTable.remove(tagName);
+ }
+
+ /**
+ * Sets the named metric to the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue new value of the metric
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public void setMetric(String metricName, int metricValue) {
+ setAbsolute(metricName, Integer.valueOf(metricValue));
+ }
+
+ /**
+ * Sets the named metric to the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue new value of the metric
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public void setMetric(String metricName, long metricValue) {
+ setAbsolute(metricName, Long.valueOf(metricValue));
+ }
+
+ /**
+ * Sets the named metric to the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue new value of the metric
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public void setMetric(String metricName, short metricValue) {
+ setAbsolute(metricName, Short.valueOf(metricValue));
+ }
+
+ /**
+ * Sets the named metric to the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue new value of the metric
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public void setMetric(String metricName, byte metricValue) {
+ setAbsolute(metricName, Byte.valueOf(metricValue));
+ }
+
+ /**
+ * Sets the named metric to the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue new value of the metric
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public void setMetric(String metricName, float metricValue) {
+ setAbsolute(metricName, new Float(metricValue));
+ }
+
+ /**
+ * Increments the named metric by the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue incremental value
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public void incrMetric(String metricName, int metricValue) {
+ setIncrement(metricName, Integer.valueOf(metricValue));
+ }
+
+ /**
+ * Increments the named metric by the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue incremental value
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public void incrMetric(String metricName, long metricValue) {
+ setIncrement(metricName, Long.valueOf(metricValue));
+ }
+
+ /**
+ * Increments the named metric by the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue incremental value
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public void incrMetric(String metricName, short metricValue) {
+ setIncrement(metricName, Short.valueOf(metricValue));
+ }
+
+ /**
+ * Increments the named metric by the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue incremental value
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public void incrMetric(String metricName, byte metricValue) {
+ setIncrement(metricName, Byte.valueOf(metricValue));
+ }
+
+ /**
+ * Increments the named metric by the specified value.
+ *
+ * @param metricName name of the metric
+ * @param metricValue incremental value
+ * @throws MetricsException if the metricName or the type of the metricValue
+ * conflicts with the configuration
+ */
+ public void incrMetric(String metricName, float metricValue) {
+ setIncrement(metricName, new Float(metricValue));
+ }
+
+ private void setAbsolute(String metricName, Number metricValue) {
+ metricTable.put(metricName, new MetricValue(metricValue, MetricValue.ABSOLUTE));
+ }
+
+ private void setIncrement(String metricName, Number metricValue) {
+ metricTable.put(metricName, new MetricValue(metricValue, MetricValue.INCREMENT));
+ }
+
+ /**
+ * Updates the table of buffered data which is to be sent periodically.
+ * If the tag values match an existing row, that row is updated;
+ * otherwise, a new row is added.
+ */
+ public void update() {
+ context.update(this);
+ }
+
+ /**
+ * Removes the row, if it exists, in the buffered data table having tags
+ * that equal the tags that have been set on this record.
+ */
+ public void remove() {
+ context.remove(this);
+ }
+
+ TagMap getTagTable() {
+ return tagTable;
+ }
+
+ Map getMetricTable() {
+ return metricTable;
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
index 7d321e8a297..4a3424bad32 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
@@ -1,460 +1,460 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.util;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.zip.Checksum;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.ChecksumException;
-
-/**
- * This class provides inteface and utilities for processing checksums for
- * DFS data transfers.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-@InterfaceStability.Evolving
-public class DataChecksum implements Checksum {
-
- // Misc constants
- public static final int HEADER_LEN = 5; /// 1 byte type and 4 byte len
-
- // checksum types
- public static final int CHECKSUM_NULL = 0;
- public static final int CHECKSUM_CRC32 = 1;
- public static final int CHECKSUM_CRC32C = 2;
- public static final int CHECKSUM_DEFAULT = 3;
- public static final int CHECKSUM_MIXED = 4;
-
- /** The checksum types */
- public static enum Type {
- NULL (CHECKSUM_NULL, 0),
- CRC32 (CHECKSUM_CRC32, 4),
- CRC32C(CHECKSUM_CRC32C, 4),
- DEFAULT(CHECKSUM_DEFAULT, 0), // This cannot be used to create DataChecksum
- MIXED (CHECKSUM_MIXED, 0); // This cannot be used to create DataChecksum
-
- public final int id;
- public final int size;
-
- private Type(int id, int size) {
- this.id = id;
- this.size = size;
- }
-
- /** @return the type corresponding to the id. */
- public static Type valueOf(int id) {
- if (id < 0 || id >= values().length) {
- throw new IllegalArgumentException("id=" + id
- + " out of range [0, " + values().length + ")");
- }
- return values()[id];
- }
- }
-
-
- public static DataChecksum newDataChecksum(Type type, int bytesPerChecksum ) {
- if ( bytesPerChecksum <= 0 ) {
- return null;
- }
-
- switch ( type ) {
- case NULL :
- return new DataChecksum(type, new ChecksumNull(), bytesPerChecksum );
- case CRC32 :
- return new DataChecksum(type, new PureJavaCrc32(), bytesPerChecksum );
- case CRC32C:
- return new DataChecksum(type, new PureJavaCrc32C(), bytesPerChecksum);
- default:
- return null;
- }
- }
-
- /**
- * Creates a DataChecksum from HEADER_LEN bytes from arr[offset].
- * @return DataChecksum of the type in the array or null in case of an error.
- */
- public static DataChecksum newDataChecksum( byte bytes[], int offset ) {
- if ( offset < 0 || bytes.length < offset + HEADER_LEN ) {
- return null;
- }
-
- // like readInt():
- int bytesPerChecksum = ( (bytes[offset+1] & 0xff) << 24 ) |
- ( (bytes[offset+2] & 0xff) << 16 ) |
- ( (bytes[offset+3] & 0xff) << 8 ) |
- ( (bytes[offset+4] & 0xff) );
- return newDataChecksum( Type.valueOf(bytes[0]), bytesPerChecksum );
- }
-
- /**
- * This constructucts a DataChecksum by reading HEADER_LEN bytes from
- * input stream in
- */
- public static DataChecksum newDataChecksum( DataInputStream in )
- throws IOException {
- int type = in.readByte();
- int bpc = in.readInt();
- DataChecksum summer = newDataChecksum(Type.valueOf(type), bpc );
- if ( summer == null ) {
- throw new IOException( "Could not create DataChecksum of type " +
- type + " with bytesPerChecksum " + bpc );
- }
- return summer;
- }
-
- /**
- * Writes the checksum header to the output stream out.
- */
- public void writeHeader( DataOutputStream out )
- throws IOException {
- out.writeByte( type.id );
- out.writeInt( bytesPerChecksum );
- }
-
- public byte[] getHeader() {
- byte[] header = new byte[DataChecksum.HEADER_LEN];
- header[0] = (byte) (type.id & 0xff);
- // Writing in buffer just like DataOutput.WriteInt()
- header[1+0] = (byte) ((bytesPerChecksum >>> 24) & 0xff);
- header[1+1] = (byte) ((bytesPerChecksum >>> 16) & 0xff);
- header[1+2] = (byte) ((bytesPerChecksum >>> 8) & 0xff);
- header[1+3] = (byte) (bytesPerChecksum & 0xff);
- return header;
- }
-
- /**
- * Writes the current checksum to the stream.
- * If reset is true, then resets the checksum.
- * @return number of bytes written. Will be equal to getChecksumSize();
- */
- public int writeValue( DataOutputStream out, boolean reset )
- throws IOException {
- if ( type.size <= 0 ) {
- return 0;
- }
-
- if ( type.size == 4 ) {
- out.writeInt( (int) summer.getValue() );
- } else {
- throw new IOException( "Unknown Checksum " + type );
- }
-
- if ( reset ) {
- reset();
- }
-
- return type.size;
- }
-
- /**
- * Writes the current checksum to a buffer.
- * If reset is true, then resets the checksum.
- * @return number of bytes written. Will be equal to getChecksumSize();
- */
- public int writeValue( byte[] buf, int offset, boolean reset )
- throws IOException {
- if ( type.size <= 0 ) {
- return 0;
- }
-
- if ( type.size == 4 ) {
- int checksum = (int) summer.getValue();
- buf[offset+0] = (byte) ((checksum >>> 24) & 0xff);
- buf[offset+1] = (byte) ((checksum >>> 16) & 0xff);
- buf[offset+2] = (byte) ((checksum >>> 8) & 0xff);
- buf[offset+3] = (byte) (checksum & 0xff);
- } else {
- throw new IOException( "Unknown Checksum " + type );
- }
-
- if ( reset ) {
- reset();
- }
-
- return type.size;
- }
-
- /**
- * Compares the checksum located at buf[offset] with the current checksum.
- * @return true if the checksum matches and false otherwise.
- */
- public boolean compare( byte buf[], int offset ) {
- if ( type.size == 4 ) {
- int checksum = ( (buf[offset+0] & 0xff) << 24 ) |
- ( (buf[offset+1] & 0xff) << 16 ) |
- ( (buf[offset+2] & 0xff) << 8 ) |
- ( (buf[offset+3] & 0xff) );
- return checksum == (int) summer.getValue();
- }
- return type.size == 0;
- }
-
- private final Type type;
- private final Checksum summer;
- private final int bytesPerChecksum;
- private int inSum = 0;
-
- private DataChecksum( Type type, Checksum checksum, int chunkSize ) {
- this.type = type;
- summer = checksum;
- bytesPerChecksum = chunkSize;
- }
-
- // Accessors
- public Type getChecksumType() {
- return type;
- }
- public int getChecksumSize() {
- return type.size;
- }
- public int getBytesPerChecksum() {
- return bytesPerChecksum;
- }
- public int getNumBytesInSum() {
- return inSum;
- }
-
- public static final int SIZE_OF_INTEGER = Integer.SIZE / Byte.SIZE;
- static public int getChecksumHeaderSize() {
- return 1 + SIZE_OF_INTEGER; // type byte, bytesPerChecksum int
- }
- //Checksum Interface. Just a wrapper around member summer.
- @Override
- public long getValue() {
- return summer.getValue();
- }
- @Override
- public void reset() {
- summer.reset();
- inSum = 0;
- }
- @Override
- public void update( byte[] b, int off, int len ) {
- if ( len > 0 ) {
- summer.update( b, off, len );
- inSum += len;
- }
- }
- @Override
- public void update( int b ) {
- summer.update( b );
- inSum += 1;
- }
-
- /**
- * Verify that the given checksums match the given data.
- *
- * The 'mark' of the ByteBuffer parameters may be modified by this function,.
- * but the position is maintained.
- *
- * @param data the DirectByteBuffer pointing to the data to verify.
- * @param checksums the DirectByteBuffer pointing to a series of stored
- * checksums
- * @param fileName the name of the file being read, for error-reporting
- * @param basePos the file position to which the start of 'data' corresponds
- * @throws ChecksumException if the checksums do not match
- */
- public void verifyChunkedSums(ByteBuffer data, ByteBuffer checksums,
- String fileName, long basePos)
- throws ChecksumException {
- if (type.size == 0) return;
-
- if (data.hasArray() && checksums.hasArray()) {
- verifyChunkedSums(
- data.array(), data.arrayOffset() + data.position(), data.remaining(),
- checksums.array(), checksums.arrayOffset() + checksums.position(),
- fileName, basePos);
- return;
- }
- if (NativeCrc32.isAvailable()) {
- NativeCrc32.verifyChunkedSums(bytesPerChecksum, type.id, checksums, data,
- fileName, basePos);
- return;
- }
-
- int startDataPos = data.position();
- data.mark();
- checksums.mark();
- try {
- byte[] buf = new byte[bytesPerChecksum];
- byte[] sum = new byte[type.size];
- while (data.remaining() > 0) {
- int n = Math.min(data.remaining(), bytesPerChecksum);
- checksums.get(sum);
- data.get(buf, 0, n);
- summer.reset();
- summer.update(buf, 0, n);
- int calculated = (int)summer.getValue();
- int stored = (sum[0] << 24 & 0xff000000) |
- (sum[1] << 16 & 0xff0000) |
- (sum[2] << 8 & 0xff00) |
- sum[3] & 0xff;
- if (calculated != stored) {
- long errPos = basePos + data.position() - startDataPos - n;
- throw new ChecksumException(
- "Checksum error: "+ fileName + " at "+ errPos +
- " exp: " + stored + " got: " + calculated, errPos);
- }
- }
- } finally {
- data.reset();
- checksums.reset();
- }
- }
-
- /**
- * Implementation of chunked verification specifically on byte arrays. This
- * is to avoid the copy when dealing with ByteBuffers that have array backing.
- */
- private void verifyChunkedSums(
- byte[] data, int dataOff, int dataLen,
- byte[] checksums, int checksumsOff, String fileName,
- long basePos) throws ChecksumException {
-
- int remaining = dataLen;
- int dataPos = 0;
- while (remaining > 0) {
- int n = Math.min(remaining, bytesPerChecksum);
-
- summer.reset();
- summer.update(data, dataOff + dataPos, n);
- dataPos += n;
- remaining -= n;
-
- int calculated = (int)summer.getValue();
- int stored = (checksums[checksumsOff] << 24 & 0xff000000) |
- (checksums[checksumsOff + 1] << 16 & 0xff0000) |
- (checksums[checksumsOff + 2] << 8 & 0xff00) |
- checksums[checksumsOff + 3] & 0xff;
- checksumsOff += 4;
- if (calculated != stored) {
- long errPos = basePos + dataPos - n;
- throw new ChecksumException(
- "Checksum error: "+ fileName + " at "+ errPos +
- " exp: " + stored + " got: " + calculated, errPos);
- }
- }
- }
-
- /**
- * Calculate checksums for the given data.
- *
- * The 'mark' of the ByteBuffer parameters may be modified by this function,
- * but the position is maintained.
- *
- * @param data the DirectByteBuffer pointing to the data to checksum.
- * @param checksums the DirectByteBuffer into which checksums will be
- * stored. Enough space must be available in this
- * buffer to put the checksums.
- */
- public void calculateChunkedSums(ByteBuffer data, ByteBuffer checksums) {
- if (type.size == 0) return;
-
- if (data.hasArray() && checksums.hasArray()) {
- calculateChunkedSums(data.array(), data.arrayOffset() + data.position(), data.remaining(),
- checksums.array(), checksums.arrayOffset() + checksums.position());
- return;
- }
-
- data.mark();
- checksums.mark();
- try {
- byte[] buf = new byte[bytesPerChecksum];
- while (data.remaining() > 0) {
- int n = Math.min(data.remaining(), bytesPerChecksum);
- data.get(buf, 0, n);
- summer.reset();
- summer.update(buf, 0, n);
- checksums.putInt((int)summer.getValue());
- }
- } finally {
- data.reset();
- checksums.reset();
- }
- }
-
- /**
- * Implementation of chunked calculation specifically on byte arrays. This
- * is to avoid the copy when dealing with ByteBuffers that have array backing.
- */
- private void calculateChunkedSums(
- byte[] data, int dataOffset, int dataLength,
- byte[] sums, int sumsOffset) {
-
- int remaining = dataLength;
- while (remaining > 0) {
- int n = Math.min(remaining, bytesPerChecksum);
- summer.reset();
- summer.update(data, dataOffset, n);
- dataOffset += n;
- remaining -= n;
- long calculated = summer.getValue();
- sums[sumsOffset++] = (byte) (calculated >> 24);
- sums[sumsOffset++] = (byte) (calculated >> 16);
- sums[sumsOffset++] = (byte) (calculated >> 8);
- sums[sumsOffset++] = (byte) (calculated);
- }
- }
-
- @Override
- public boolean equals(Object other) {
- if (!(other instanceof DataChecksum)) {
- return false;
- }
- DataChecksum o = (DataChecksum)other;
- return o.bytesPerChecksum == this.bytesPerChecksum &&
- o.type == this.type;
- }
-
- @Override
- public int hashCode() {
- return (this.type.id + 31) * this.bytesPerChecksum;
- }
-
- @Override
- public String toString() {
- return "DataChecksum(type=" + type +
- ", chunkSize=" + bytesPerChecksum + ")";
- }
-
- /**
- * This just provides a dummy implimentation for Checksum class
- * This is used when there is no checksum available or required for
- * data
- */
- static class ChecksumNull implements Checksum {
-
- public ChecksumNull() {}
-
- //Dummy interface
- @Override
- public long getValue() { return 0; }
- @Override
- public void reset() {}
- @Override
- public void update(byte[] b, int off, int len) {}
- @Override
- public void update(int b) {}
- };
-}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.zip.Checksum;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ChecksumException;
+
+/**
+ * This class provides inteface and utilities for processing checksums for
+ * DFS data transfers.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
+public class DataChecksum implements Checksum {
+
+ // Misc constants
+ public static final int HEADER_LEN = 5; /// 1 byte type and 4 byte len
+
+ // checksum types
+ public static final int CHECKSUM_NULL = 0;
+ public static final int CHECKSUM_CRC32 = 1;
+ public static final int CHECKSUM_CRC32C = 2;
+ public static final int CHECKSUM_DEFAULT = 3;
+ public static final int CHECKSUM_MIXED = 4;
+
+ /** The checksum types */
+ public static enum Type {
+ NULL (CHECKSUM_NULL, 0),
+ CRC32 (CHECKSUM_CRC32, 4),
+ CRC32C(CHECKSUM_CRC32C, 4),
+ DEFAULT(CHECKSUM_DEFAULT, 0), // This cannot be used to create DataChecksum
+ MIXED (CHECKSUM_MIXED, 0); // This cannot be used to create DataChecksum
+
+ public final int id;
+ public final int size;
+
+ private Type(int id, int size) {
+ this.id = id;
+ this.size = size;
+ }
+
+ /** @return the type corresponding to the id. */
+ public static Type valueOf(int id) {
+ if (id < 0 || id >= values().length) {
+ throw new IllegalArgumentException("id=" + id
+ + " out of range [0, " + values().length + ")");
+ }
+ return values()[id];
+ }
+ }
+
+
+ public static DataChecksum newDataChecksum(Type type, int bytesPerChecksum ) {
+ if ( bytesPerChecksum <= 0 ) {
+ return null;
+ }
+
+ switch ( type ) {
+ case NULL :
+ return new DataChecksum(type, new ChecksumNull(), bytesPerChecksum );
+ case CRC32 :
+ return new DataChecksum(type, new PureJavaCrc32(), bytesPerChecksum );
+ case CRC32C:
+ return new DataChecksum(type, new PureJavaCrc32C(), bytesPerChecksum);
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Creates a DataChecksum from HEADER_LEN bytes from arr[offset].
+ * @return DataChecksum of the type in the array or null in case of an error.
+ */
+ public static DataChecksum newDataChecksum( byte bytes[], int offset ) {
+ if ( offset < 0 || bytes.length < offset + HEADER_LEN ) {
+ return null;
+ }
+
+ // like readInt():
+ int bytesPerChecksum = ( (bytes[offset+1] & 0xff) << 24 ) |
+ ( (bytes[offset+2] & 0xff) << 16 ) |
+ ( (bytes[offset+3] & 0xff) << 8 ) |
+ ( (bytes[offset+4] & 0xff) );
+ return newDataChecksum( Type.valueOf(bytes[0]), bytesPerChecksum );
+ }
+
+ /**
+ * This constructucts a DataChecksum by reading HEADER_LEN bytes from
+ * input stream in
+ */
+ public static DataChecksum newDataChecksum( DataInputStream in )
+ throws IOException {
+ int type = in.readByte();
+ int bpc = in.readInt();
+ DataChecksum summer = newDataChecksum(Type.valueOf(type), bpc );
+ if ( summer == null ) {
+ throw new IOException( "Could not create DataChecksum of type " +
+ type + " with bytesPerChecksum " + bpc );
+ }
+ return summer;
+ }
+
+ /**
+ * Writes the checksum header to the output stream out.
+ */
+ public void writeHeader( DataOutputStream out )
+ throws IOException {
+ out.writeByte( type.id );
+ out.writeInt( bytesPerChecksum );
+ }
+
+ public byte[] getHeader() {
+ byte[] header = new byte[DataChecksum.HEADER_LEN];
+ header[0] = (byte) (type.id & 0xff);
+ // Writing in buffer just like DataOutput.WriteInt()
+ header[1+0] = (byte) ((bytesPerChecksum >>> 24) & 0xff);
+ header[1+1] = (byte) ((bytesPerChecksum >>> 16) & 0xff);
+ header[1+2] = (byte) ((bytesPerChecksum >>> 8) & 0xff);
+ header[1+3] = (byte) (bytesPerChecksum & 0xff);
+ return header;
+ }
+
+ /**
+ * Writes the current checksum to the stream.
+ * If reset is true, then resets the checksum.
+ * @return number of bytes written. Will be equal to getChecksumSize();
+ */
+ public int writeValue( DataOutputStream out, boolean reset )
+ throws IOException {
+ if ( type.size <= 0 ) {
+ return 0;
+ }
+
+ if ( type.size == 4 ) {
+ out.writeInt( (int) summer.getValue() );
+ } else {
+ throw new IOException( "Unknown Checksum " + type );
+ }
+
+ if ( reset ) {
+ reset();
+ }
+
+ return type.size;
+ }
+
+ /**
+ * Writes the current checksum to a buffer.
+ * If reset is true, then resets the checksum.
+ * @return number of bytes written. Will be equal to getChecksumSize();
+ */
+ public int writeValue( byte[] buf, int offset, boolean reset )
+ throws IOException {
+ if ( type.size <= 0 ) {
+ return 0;
+ }
+
+ if ( type.size == 4 ) {
+ int checksum = (int) summer.getValue();
+ buf[offset+0] = (byte) ((checksum >>> 24) & 0xff);
+ buf[offset+1] = (byte) ((checksum >>> 16) & 0xff);
+ buf[offset+2] = (byte) ((checksum >>> 8) & 0xff);
+ buf[offset+3] = (byte) (checksum & 0xff);
+ } else {
+ throw new IOException( "Unknown Checksum " + type );
+ }
+
+ if ( reset ) {
+ reset();
+ }
+
+ return type.size;
+ }
+
+ /**
+ * Compares the checksum located at buf[offset] with the current checksum.
+ * @return true if the checksum matches and false otherwise.
+ */
+ public boolean compare( byte buf[], int offset ) {
+ if ( type.size == 4 ) {
+ int checksum = ( (buf[offset+0] & 0xff) << 24 ) |
+ ( (buf[offset+1] & 0xff) << 16 ) |
+ ( (buf[offset+2] & 0xff) << 8 ) |
+ ( (buf[offset+3] & 0xff) );
+ return checksum == (int) summer.getValue();
+ }
+ return type.size == 0;
+ }
+
+ private final Type type;
+ private final Checksum summer;
+ private final int bytesPerChecksum;
+ private int inSum = 0;
+
+ private DataChecksum( Type type, Checksum checksum, int chunkSize ) {
+ this.type = type;
+ summer = checksum;
+ bytesPerChecksum = chunkSize;
+ }
+
+ // Accessors
+ public Type getChecksumType() {
+ return type;
+ }
+ public int getChecksumSize() {
+ return type.size;
+ }
+ public int getBytesPerChecksum() {
+ return bytesPerChecksum;
+ }
+ public int getNumBytesInSum() {
+ return inSum;
+ }
+
+ public static final int SIZE_OF_INTEGER = Integer.SIZE / Byte.SIZE;
+ static public int getChecksumHeaderSize() {
+ return 1 + SIZE_OF_INTEGER; // type byte, bytesPerChecksum int
+ }
+ //Checksum Interface. Just a wrapper around member summer.
+ @Override
+ public long getValue() {
+ return summer.getValue();
+ }
+ @Override
+ public void reset() {
+ summer.reset();
+ inSum = 0;
+ }
+ @Override
+ public void update( byte[] b, int off, int len ) {
+ if ( len > 0 ) {
+ summer.update( b, off, len );
+ inSum += len;
+ }
+ }
+ @Override
+ public void update( int b ) {
+ summer.update( b );
+ inSum += 1;
+ }
+
+ /**
+ * Verify that the given checksums match the given data.
+ *
+ * The 'mark' of the ByteBuffer parameters may be modified by this function,.
+ * but the position is maintained.
+ *
+ * @param data the DirectByteBuffer pointing to the data to verify.
+ * @param checksums the DirectByteBuffer pointing to a series of stored
+ * checksums
+ * @param fileName the name of the file being read, for error-reporting
+ * @param basePos the file position to which the start of 'data' corresponds
+ * @throws ChecksumException if the checksums do not match
+ */
+ public void verifyChunkedSums(ByteBuffer data, ByteBuffer checksums,
+ String fileName, long basePos)
+ throws ChecksumException {
+ if (type.size == 0) return;
+
+ if (data.hasArray() && checksums.hasArray()) {
+ verifyChunkedSums(
+ data.array(), data.arrayOffset() + data.position(), data.remaining(),
+ checksums.array(), checksums.arrayOffset() + checksums.position(),
+ fileName, basePos);
+ return;
+ }
+ if (NativeCrc32.isAvailable()) {
+ NativeCrc32.verifyChunkedSums(bytesPerChecksum, type.id, checksums, data,
+ fileName, basePos);
+ return;
+ }
+
+ int startDataPos = data.position();
+ data.mark();
+ checksums.mark();
+ try {
+ byte[] buf = new byte[bytesPerChecksum];
+ byte[] sum = new byte[type.size];
+ while (data.remaining() > 0) {
+ int n = Math.min(data.remaining(), bytesPerChecksum);
+ checksums.get(sum);
+ data.get(buf, 0, n);
+ summer.reset();
+ summer.update(buf, 0, n);
+ int calculated = (int)summer.getValue();
+ int stored = (sum[0] << 24 & 0xff000000) |
+ (sum[1] << 16 & 0xff0000) |
+ (sum[2] << 8 & 0xff00) |
+ sum[3] & 0xff;
+ if (calculated != stored) {
+ long errPos = basePos + data.position() - startDataPos - n;
+ throw new ChecksumException(
+ "Checksum error: "+ fileName + " at "+ errPos +
+ " exp: " + stored + " got: " + calculated, errPos);
+ }
+ }
+ } finally {
+ data.reset();
+ checksums.reset();
+ }
+ }
+
+ /**
+ * Implementation of chunked verification specifically on byte arrays. This
+ * is to avoid the copy when dealing with ByteBuffers that have array backing.
+ */
+ private void verifyChunkedSums(
+ byte[] data, int dataOff, int dataLen,
+ byte[] checksums, int checksumsOff, String fileName,
+ long basePos) throws ChecksumException {
+
+ int remaining = dataLen;
+ int dataPos = 0;
+ while (remaining > 0) {
+ int n = Math.min(remaining, bytesPerChecksum);
+
+ summer.reset();
+ summer.update(data, dataOff + dataPos, n);
+ dataPos += n;
+ remaining -= n;
+
+ int calculated = (int)summer.getValue();
+ int stored = (checksums[checksumsOff] << 24 & 0xff000000) |
+ (checksums[checksumsOff + 1] << 16 & 0xff0000) |
+ (checksums[checksumsOff + 2] << 8 & 0xff00) |
+ checksums[checksumsOff + 3] & 0xff;
+ checksumsOff += 4;
+ if (calculated != stored) {
+ long errPos = basePos + dataPos - n;
+ throw new ChecksumException(
+ "Checksum error: "+ fileName + " at "+ errPos +
+ " exp: " + stored + " got: " + calculated, errPos);
+ }
+ }
+ }
+
+ /**
+ * Calculate checksums for the given data.
+ *
+ * The 'mark' of the ByteBuffer parameters may be modified by this function,
+ * but the position is maintained.
+ *
+ * @param data the DirectByteBuffer pointing to the data to checksum.
+ * @param checksums the DirectByteBuffer into which checksums will be
+ * stored. Enough space must be available in this
+ * buffer to put the checksums.
+ */
+ public void calculateChunkedSums(ByteBuffer data, ByteBuffer checksums) {
+ if (type.size == 0) return;
+
+ if (data.hasArray() && checksums.hasArray()) {
+ calculateChunkedSums(data.array(), data.arrayOffset() + data.position(), data.remaining(),
+ checksums.array(), checksums.arrayOffset() + checksums.position());
+ return;
+ }
+
+ data.mark();
+ checksums.mark();
+ try {
+ byte[] buf = new byte[bytesPerChecksum];
+ while (data.remaining() > 0) {
+ int n = Math.min(data.remaining(), bytesPerChecksum);
+ data.get(buf, 0, n);
+ summer.reset();
+ summer.update(buf, 0, n);
+ checksums.putInt((int)summer.getValue());
+ }
+ } finally {
+ data.reset();
+ checksums.reset();
+ }
+ }
+
+ /**
+ * Implementation of chunked calculation specifically on byte arrays. This
+ * is to avoid the copy when dealing with ByteBuffers that have array backing.
+ */
+ private void calculateChunkedSums(
+ byte[] data, int dataOffset, int dataLength,
+ byte[] sums, int sumsOffset) {
+
+ int remaining = dataLength;
+ while (remaining > 0) {
+ int n = Math.min(remaining, bytesPerChecksum);
+ summer.reset();
+ summer.update(data, dataOffset, n);
+ dataOffset += n;
+ remaining -= n;
+ long calculated = summer.getValue();
+ sums[sumsOffset++] = (byte) (calculated >> 24);
+ sums[sumsOffset++] = (byte) (calculated >> 16);
+ sums[sumsOffset++] = (byte) (calculated >> 8);
+ sums[sumsOffset++] = (byte) (calculated);
+ }
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof DataChecksum)) {
+ return false;
+ }
+ DataChecksum o = (DataChecksum)other;
+ return o.bytesPerChecksum == this.bytesPerChecksum &&
+ o.type == this.type;
+ }
+
+ @Override
+ public int hashCode() {
+ return (this.type.id + 31) * this.bytesPerChecksum;
+ }
+
+ @Override
+ public String toString() {
+ return "DataChecksum(type=" + type +
+ ", chunkSize=" + bytesPerChecksum + ")";
+ }
+
+ /**
+ * This just provides a dummy implimentation for Checksum class
+ * This is used when there is no checksum available or required for
+ * data
+ */
+ static class ChecksumNull implements Checksum {
+
+ public ChecksumNull() {}
+
+ //Dummy interface
+ @Override
+ public long getValue() { return 0; }
+ @Override
+ public void reset() {}
+ @Override
+ public void update(byte[] b, int off, int len) {}
+ @Override
+ public void update(int b) {}
+ };
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5e642124f6c..dbf4c9d425c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -48,6 +48,9 @@ Release 2.0.3-alpha - Unreleased
HDFS-4041. Hadoop HDFS Maven protoc calls must not depend on external
sh script. (Chris Nauroth via suresh)
+ HADOOP-8911. CRLF characters in source and text files.
+ (Raja Aluri via suresh)
+
OPTIMIZATIONS
BUG FIXES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/libhdfs.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/libhdfs.xml
index 44ab6c9c2b1..2d4091b98b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/libhdfs.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/libhdfs.xml
@@ -1,110 +1,110 @@
-
-
-
-
-
-
-
-C API libhdfs
-Content-Type
-text/html;
-utf-8
-
-
-
-Overview
-
-
-libhdfs is a JNI based C API for Hadoop's Distributed File System (HDFS).
-It provides C APIs to a subset of the HDFS APIs to manipulate HDFS files and
-the filesystem. libhdfs is part of the Hadoop distribution and comes
-pre-compiled in ${HADOOP_PREFIX}/libhdfs/libhdfs.so .
-
-The header file for libhdfs describes each API in detail and is available in ${HADOOP_PREFIX}/src/c++/libhdfs/hdfs.h
-
-
-
-A Sample Program
-
-
-
-
-
-How To Link With The Library
-
-See the Makefile for hdfs_test.c in the libhdfs source directory (${HADOOP_PREFIX}/src/c++/libhdfs/Makefile) or something like:
-gcc above_sample.c -I${HADOOP_PREFIX}/src/c++/libhdfs -L${HADOOP_PREFIX}/libhdfs -lhdfs -o above_sample
-
-
-
-Common Problems
-
-The most common problem is the CLASSPATH is not set properly when calling a program that uses libhdfs.
-Make sure you set it to all the Hadoop jars needed to run Hadoop itself. Currently, there is no way to
-programmatically generate the classpath, but a good bet is to include all the jar files in ${HADOOP_PREFIX}
-and ${HADOOP_PREFIX}/lib as well as the right configuration directory containing hdfs-site.xml
-
-
-
-Thread Safe
-
libdhfs is thread safe.
-
-
Concurrency and Hadoop FS "handles"
- The Hadoop FS implementation includes a FS handle cache which caches based on the URI of the
-namenode along with the user connecting. So, all calls to hdfsConnect will return the same handle but
-calls to hdfsConnectAsUser with different users will return different handles. But, since HDFS client
-handles are completely thread safe, this has no bearing on concurrency.
-
-
Concurrency and libhdfs/JNI
- The libhdfs calls to JNI should always be creating thread local storage, so (in theory), libhdfs
-should be as thread safe as the underlying calls to the Hadoop FS.
-
+libhdfs is a JNI based C API for Hadoop's Distributed File System (HDFS).
+It provides C APIs to a subset of the HDFS APIs to manipulate HDFS files and
+the filesystem. libhdfs is part of the Hadoop distribution and comes
+pre-compiled in ${HADOOP_PREFIX}/libhdfs/libhdfs.so .
+
+The header file for libhdfs describes each API in detail and is available in ${HADOOP_PREFIX}/src/c++/libhdfs/hdfs.h
+
+
+
+A Sample Program
+
+
+
+
+
+How To Link With The Library
+
+See the Makefile for hdfs_test.c in the libhdfs source directory (${HADOOP_PREFIX}/src/c++/libhdfs/Makefile) or something like:
+gcc above_sample.c -I${HADOOP_PREFIX}/src/c++/libhdfs -L${HADOOP_PREFIX}/libhdfs -lhdfs -o above_sample
+
+
+
+Common Problems
+
+The most common problem is the CLASSPATH is not set properly when calling a program that uses libhdfs.
+Make sure you set it to all the Hadoop jars needed to run Hadoop itself. Currently, there is no way to
+programmatically generate the classpath, but a good bet is to include all the jar files in ${HADOOP_PREFIX}
+and ${HADOOP_PREFIX}/lib as well as the right configuration directory containing hdfs-site.xml
+
+
+
+Thread Safe
+
libdhfs is thread safe.
+
+
Concurrency and Hadoop FS "handles"
+ The Hadoop FS implementation includes a FS handle cache which caches based on the URI of the
+namenode along with the user connecting. So, all calls to hdfsConnect will return the same handle but
+calls to hdfsConnectAsUser with different users will return different handles. But, since HDFS client
+handles are completely thread safe, this has no bearing on concurrency.
+
+
Concurrency and libhdfs/JNI
+ The libhdfs calls to JNI should always be creating thread local storage, so (in theory), libhdfs
+should be as thread safe as the underlying calls to the Hadoop FS.
+
+
+
+
+
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 9f520ff3a45..34cabae644a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -17,6 +17,9 @@ Release 2.0.3-alpha - Unreleased
MAPREDUCE-4616. Improve javadoc for MultipleOutputs. (Tony Burton via
acmurthy)
+ HADOOP-8911. CRLF characters in source and text files.
+ (Raja Aluri via suresh)
+
OPTIMIZATIONS
BUG FIXES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
index f718e1f4998..74148aea710 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
@@ -1,120 +1,120 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce;
-
-import java.io.IOException;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.LocalJobRunner;
-import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.junit.Test;
-
-public class TestClientProtocolProviderImpls extends TestCase {
-
- @Test
- public void testClusterWithLocalClientProvider() throws Exception {
-
- Configuration conf = new Configuration();
-
- try {
- conf.set(MRConfig.FRAMEWORK_NAME, "incorrect");
- new Cluster(conf);
- fail("Cluster should not be initialized with incorrect framework name");
- } catch (IOException e) {
-
- }
-
- try {
- conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
- conf.set(JTConfig.JT_IPC_ADDRESS, "127.0.0.1:0");
-
- new Cluster(conf);
- fail("Cluster with Local Framework name should use local JT address");
- } catch (IOException e) {
-
- }
-
- try {
- conf.set(JTConfig.JT_IPC_ADDRESS, "local");
- Cluster cluster = new Cluster(conf);
- assertTrue(cluster.getClient() instanceof LocalJobRunner);
- cluster.close();
- } catch (IOException e) {
-
- }
- }
-
- @Test
- public void testClusterWithJTClientProvider() throws Exception {
-
- Configuration conf = new Configuration();
- try {
- conf.set(MRConfig.FRAMEWORK_NAME, "incorrect");
- new Cluster(conf);
- fail("Cluster should not be initialized with incorrect framework name");
-
- } catch (IOException e) {
-
- }
-
- try {
- conf.set(MRConfig.FRAMEWORK_NAME, "classic");
- conf.set(JTConfig.JT_IPC_ADDRESS, "local");
- new Cluster(conf);
- fail("Cluster with classic Framework name shouldnot use local JT address");
-
- } catch (IOException e) {
-
- }
-
- try {
- conf = new Configuration();
- conf.set(MRConfig.FRAMEWORK_NAME, "classic");
- conf.set(JTConfig.JT_IPC_ADDRESS, "127.0.0.1:0");
- Cluster cluster = new Cluster(conf);
- cluster.close();
- } catch (IOException e) {
-
- }
- }
-
- @Test
- public void testClusterException() {
-
- Configuration conf = new Configuration();
- conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
- conf.set(JTConfig.JT_IPC_ADDRESS, "local");
-
- // initializing a cluster with this conf should throw an error.
- // However the exception thrown should not be specific to either
- // the job tracker client provider or the local provider
- boolean errorThrown = false;
- try {
- Cluster cluster = new Cluster(conf);
- cluster.close();
- fail("Not expected - cluster init should have failed");
- } catch (IOException e) {
- errorThrown = true;
- assert(e.getMessage().contains("Cannot initialize Cluster. Please check"));
- }
- assert(errorThrown);
- }
-}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce;
+
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.LocalJobRunner;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.junit.Test;
+
+public class TestClientProtocolProviderImpls extends TestCase {
+
+ @Test
+ public void testClusterWithLocalClientProvider() throws Exception {
+
+ Configuration conf = new Configuration();
+
+ try {
+ conf.set(MRConfig.FRAMEWORK_NAME, "incorrect");
+ new Cluster(conf);
+ fail("Cluster should not be initialized with incorrect framework name");
+ } catch (IOException e) {
+
+ }
+
+ try {
+ conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
+ conf.set(JTConfig.JT_IPC_ADDRESS, "127.0.0.1:0");
+
+ new Cluster(conf);
+ fail("Cluster with Local Framework name should use local JT address");
+ } catch (IOException e) {
+
+ }
+
+ try {
+ conf.set(JTConfig.JT_IPC_ADDRESS, "local");
+ Cluster cluster = new Cluster(conf);
+ assertTrue(cluster.getClient() instanceof LocalJobRunner);
+ cluster.close();
+ } catch (IOException e) {
+
+ }
+ }
+
+ @Test
+ public void testClusterWithJTClientProvider() throws Exception {
+
+ Configuration conf = new Configuration();
+ try {
+ conf.set(MRConfig.FRAMEWORK_NAME, "incorrect");
+ new Cluster(conf);
+ fail("Cluster should not be initialized with incorrect framework name");
+
+ } catch (IOException e) {
+
+ }
+
+ try {
+ conf.set(MRConfig.FRAMEWORK_NAME, "classic");
+ conf.set(JTConfig.JT_IPC_ADDRESS, "local");
+ new Cluster(conf);
+ fail("Cluster with classic Framework name shouldnot use local JT address");
+
+ } catch (IOException e) {
+
+ }
+
+ try {
+ conf = new Configuration();
+ conf.set(MRConfig.FRAMEWORK_NAME, "classic");
+ conf.set(JTConfig.JT_IPC_ADDRESS, "127.0.0.1:0");
+ Cluster cluster = new Cluster(conf);
+ cluster.close();
+ } catch (IOException e) {
+
+ }
+ }
+
+ @Test
+ public void testClusterException() {
+
+ Configuration conf = new Configuration();
+ conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
+ conf.set(JTConfig.JT_IPC_ADDRESS, "local");
+
+ // initializing a cluster with this conf should throw an error.
+ // However the exception thrown should not be specific to either
+ // the job tracker client provider or the local provider
+ boolean errorThrown = false;
+ try {
+ Cluster cluster = new Cluster(conf);
+ cluster.close();
+ fail("Not expected - cluster init should have failed");
+ } catch (IOException e) {
+ errorThrown = true;
+ assert(e.getMessage().contains("Cannot initialize Cluster. Please check"));
+ }
+ assert(errorThrown);
+ }
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java
index 1bbffb8fde1..49c5dc88a64 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java
@@ -1,129 +1,129 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce;
-
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.LocalJobRunner;
-import org.apache.hadoop.mapred.ResourceMgrDelegate;
-import org.apache.hadoop.mapred.YARNRunner;
-import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.yarn.api.ClientRMProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
-import org.apache.hadoop.yarn.api.records.DelegationToken;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.factories.RecordFactory;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.junit.Test;
-
-public class TestYarnClientProtocolProvider extends TestCase {
-
- private static final RecordFactory recordFactory = RecordFactoryProvider.
- getRecordFactory(null);
-
- @Test
- public void testClusterWithYarnClientProtocolProvider() throws Exception {
-
- Configuration conf = new Configuration(false);
- Cluster cluster = null;
-
- try {
- cluster = new Cluster(conf);
- } catch (Exception e) {
- throw new Exception(
- "Failed to initialize a local runner w/o a cluster framework key", e);
- }
-
- try {
- assertTrue("client is not a LocalJobRunner",
- cluster.getClient() instanceof LocalJobRunner);
- } finally {
- if (cluster != null) {
- cluster.close();
- }
- }
-
- try {
- conf = new Configuration();
- conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
- cluster = new Cluster(conf);
- ClientProtocol client = cluster.getClient();
- assertTrue("client is a YARNRunner", client instanceof YARNRunner);
- } catch (IOException e) {
-
- } finally {
- if (cluster != null) {
- cluster.close();
- }
- }
- }
-
-
- @Test
- public void testClusterGetDelegationToken() throws Exception {
-
- Configuration conf = new Configuration(false);
- Cluster cluster = null;
- try {
- conf = new Configuration();
- conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
- cluster = new Cluster(conf);
- YARNRunner yrunner = (YARNRunner) cluster.getClient();
- GetDelegationTokenResponse getDTResponse =
- recordFactory.newRecordInstance(GetDelegationTokenResponse.class);
- DelegationToken rmDTToken = recordFactory.newRecordInstance(
- DelegationToken.class);
- rmDTToken.setIdentifier(ByteBuffer.wrap(new byte[2]));
- rmDTToken.setKind("Testclusterkind");
- rmDTToken.setPassword(ByteBuffer.wrap("testcluster".getBytes()));
- rmDTToken.setService("0.0.0.0:8032");
- getDTResponse.setRMDelegationToken(rmDTToken);
- final ClientRMProtocol cRMProtocol = mock(ClientRMProtocol.class);
- when(cRMProtocol.getDelegationToken(any(
- GetDelegationTokenRequest.class))).thenReturn(getDTResponse);
- ResourceMgrDelegate rmgrDelegate = new ResourceMgrDelegate(
- new YarnConfiguration(conf)) {
- @Override
- public synchronized void start() {
- this.rmClient = cRMProtocol;
- }
- };
- yrunner.setResourceMgrDelegate(rmgrDelegate);
- Token t = cluster.getDelegationToken(new Text(" "));
- assertTrue("Token kind is instead " + t.getKind().toString(),
- "Testclusterkind".equals(t.getKind().toString()));
- } finally {
- if (cluster != null) {
- cluster.close();
- }
- }
- }
-
-}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.LocalJobRunner;
+import org.apache.hadoop.mapred.ResourceMgrDelegate;
+import org.apache.hadoop.mapred.YARNRunner;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.records.DelegationToken;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.junit.Test;
+
+public class TestYarnClientProtocolProvider extends TestCase {
+
+ private static final RecordFactory recordFactory = RecordFactoryProvider.
+ getRecordFactory(null);
+
+ @Test
+ public void testClusterWithYarnClientProtocolProvider() throws Exception {
+
+ Configuration conf = new Configuration(false);
+ Cluster cluster = null;
+
+ try {
+ cluster = new Cluster(conf);
+ } catch (Exception e) {
+ throw new Exception(
+ "Failed to initialize a local runner w/o a cluster framework key", e);
+ }
+
+ try {
+ assertTrue("client is not a LocalJobRunner",
+ cluster.getClient() instanceof LocalJobRunner);
+ } finally {
+ if (cluster != null) {
+ cluster.close();
+ }
+ }
+
+ try {
+ conf = new Configuration();
+ conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
+ cluster = new Cluster(conf);
+ ClientProtocol client = cluster.getClient();
+ assertTrue("client is a YARNRunner", client instanceof YARNRunner);
+ } catch (IOException e) {
+
+ } finally {
+ if (cluster != null) {
+ cluster.close();
+ }
+ }
+ }
+
+
+ @Test
+ public void testClusterGetDelegationToken() throws Exception {
+
+ Configuration conf = new Configuration(false);
+ Cluster cluster = null;
+ try {
+ conf = new Configuration();
+ conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
+ cluster = new Cluster(conf);
+ YARNRunner yrunner = (YARNRunner) cluster.getClient();
+ GetDelegationTokenResponse getDTResponse =
+ recordFactory.newRecordInstance(GetDelegationTokenResponse.class);
+ DelegationToken rmDTToken = recordFactory.newRecordInstance(
+ DelegationToken.class);
+ rmDTToken.setIdentifier(ByteBuffer.wrap(new byte[2]));
+ rmDTToken.setKind("Testclusterkind");
+ rmDTToken.setPassword(ByteBuffer.wrap("testcluster".getBytes()));
+ rmDTToken.setService("0.0.0.0:8032");
+ getDTResponse.setRMDelegationToken(rmDTToken);
+ final ClientRMProtocol cRMProtocol = mock(ClientRMProtocol.class);
+ when(cRMProtocol.getDelegationToken(any(
+ GetDelegationTokenRequest.class))).thenReturn(getDTResponse);
+ ResourceMgrDelegate rmgrDelegate = new ResourceMgrDelegate(
+ new YarnConfiguration(conf)) {
+ @Override
+ public synchronized void start() {
+ this.rmClient = cRMProtocol;
+ }
+ };
+ yrunner.setResourceMgrDelegate(rmgrDelegate);
+ Token t = cluster.getDelegationToken(new Text(" "));
+ assertTrue("Token kind is instead " + t.getKind().toString(),
+ "Testclusterkind".equals(t.getKind().toString()));
+ } finally {
+ if (cluster != null) {
+ cluster.close();
+ }
+ }
+ }
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java
index bc2d658b231..b1f7a67a53c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java
@@ -1,196 +1,196 @@
-package org.apache.hadoop.examples;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.util.StringTokenizer;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.Reducer;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-public class WordMean extends Configured implements Tool {
-
- private double mean = 0;
-
- private final static Text COUNT = new Text("count");
- private final static Text LENGTH = new Text("length");
- private final static LongWritable ONE = new LongWritable(1);
-
- /**
- * Maps words from line of text into 2 key-value pairs; one key-value pair for
- * counting the word, another for counting its length.
- */
- public static class WordMeanMapper extends
- Mapper