From ba5b19fb5d6a925231a123616d171e3f6062bd27 Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Wed, 9 Jan 2013 19:33:06 +0000 Subject: [PATCH 01/31] YARN-320. RM should always be able to renew its own tokens. Contributed by Daryn Sharp git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431020 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../resourcemanager/ClientRMService.java | 14 ++- .../resourcemanager/TestClientRMService.java | 94 +++++++++++++++++++ 3 files changed, 109 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index b31d62c3842..ac2e7300577 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -287,6 +287,9 @@ Release 0.23.6 - UNRELEASED YARN-50. Implement renewal / cancellation of Delegation Tokens (Siddharth Seth via tgraves) + YARN-320. RM should always be able to renew its own tokens. + (Daryn Sharp via sseth) + Release 0.23.5 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index 6003d4a7810..a464b3ae000 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -519,7 +519,7 @@ public class ClientRMService extends AbstractService implements protoToken.getIdentifier().array(), protoToken.getPassword().array(), new Text(protoToken.getKind()), new Text(protoToken.getService())); - String user = UserGroupInformation.getCurrentUser().getShortUserName(); + String user = getRenewerForToken(token); long nextExpTime = rmDTSecretManager.renewToken(token, user); RenewDelegationTokenResponse renewResponse = Records .newRecord(RenewDelegationTokenResponse.class); @@ -543,7 +543,7 @@ public class ClientRMService extends AbstractService implements protoToken.getIdentifier().array(), protoToken.getPassword().array(), new Text(protoToken.getKind()), new Text(protoToken.getService())); - String user = UserGroupInformation.getCurrentUser().getShortUserName(); + String user = getRenewerForToken(token); rmDTSecretManager.cancelToken(token, user); return Records.newRecord(CancelDelegationTokenResponse.class); } catch (IOException e) { @@ -551,6 +551,16 @@ public class ClientRMService extends AbstractService implements } } + private String getRenewerForToken(Token token) + throws IOException { + UserGroupInformation user = UserGroupInformation.getCurrentUser(); + UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); + // we can always renew our own tokens + return loginUser.getUserName().equals(user.getUserName()) + ? token.decodeIdentifier().getRenewer().toString() + : user.getShortUserName(); + } + void refreshServiceAcls(Configuration configuration, PolicyProvider policyProvider) { this.server.refreshServiceAcl(configuration, policyProvider); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index f7f2a0d220e..2157c0b1cfd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -25,6 +25,7 @@ import static org.mockito.Matchers.anyString; import java.io.IOException; import java.net.InetSocketAddress; +import java.security.PrivilegedExceptionAction; import java.util.List; import java.util.concurrent.ConcurrentHashMap; @@ -33,14 +34,19 @@ import junit.framework.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -49,11 +55,16 @@ import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.RMDelegationTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; +import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.Records; import org.junit.Test; +import org.junit.AfterClass; +import org.junit.BeforeClass; public class TestClientRMService { @@ -63,6 +74,21 @@ public class TestClientRMService { private RecordFactory recordFactory = RecordFactoryProvider .getRecordFactory(null); + private static RMDelegationTokenSecretManager dtsm; + + @BeforeClass + public static void setupSecretManager() throws IOException { + dtsm = new RMDelegationTokenSecretManager(60000, 60000, 60000, 60000); + dtsm.startThreads(); + } + + @AfterClass + public static void teardownSecretManager() { + if (dtsm != null) { + dtsm.stopThreads(); + } + } + @Test public void testGetClusterNodes() throws Exception { MockRM rm = new MockRM() { @@ -141,6 +167,74 @@ public class TestClientRMService { Assert.assertEquals(2, applications.size()); } + private static final UserGroupInformation owner = + UserGroupInformation.createRemoteUser("owner"); + private static final UserGroupInformation other = + UserGroupInformation.createRemoteUser("other"); + + @Test + public void testTokenRenewalByOwner() throws Exception { + owner.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + checkTokenRenewal(owner, owner); + return null; + } + }); + } + + @Test + public void testTokenRenewalWrongUser() throws Exception { + try { + owner.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + checkTokenRenewal(owner, other); + return null; + } + }); + } catch (YarnRemoteException e) { + Assert.assertEquals(e.getMessage(), + "Client " + owner.getUserName() + + " tries to renew a token with renewer specified as " + + other.getUserName()); + return; + } + Assert.fail("renew should have failed"); + } + + @Test + public void testTokenRenewalByLoginUser() throws Exception { + UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + checkTokenRenewal(owner, owner); + checkTokenRenewal(owner, other); + return null; + } + }); + } + + private void checkTokenRenewal(UserGroupInformation owner, + UserGroupInformation renewer) throws IOException { + RMDelegationTokenIdentifier tokenIdentifier = + new RMDelegationTokenIdentifier( + new Text(owner.getUserName()), new Text(renewer.getUserName()), null); + Token token = + new Token(tokenIdentifier, dtsm); + DelegationToken dToken = BuilderUtils.newDelegationToken( + token.getIdentifier(), token.getKind().toString(), + token.getPassword(), token.getService().toString()); + RenewDelegationTokenRequest request = + Records.newRecord(RenewDelegationTokenRequest.class); + request.setDelegationToken(dToken); + + RMContext rmContext = mock(RMContext.class); + ClientRMService rmService = new ClientRMService( + rmContext, null, null, null, dtsm); + rmService.renewDelegationToken(request); + } + private void mockRMContext(YarnScheduler yarnScheduler, RMContext rmContext) throws IOException { Dispatcher dispatcher = mock(Dispatcher.class); From 106e2e27ffb81f816ae627fa1712f5db5fb36002 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Wed, 9 Jan 2013 21:00:47 +0000 Subject: [PATCH 02/31] YARN-325. RM CapacityScheduler can deadlock when getQueueInfo() is called and a container is completing (Arun C Murthy via tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431070 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../scheduler/capacity/CSAssignment.java | 22 +++++++++++++++++++ .../scheduler/capacity/CapacityScheduler.java | 15 ++++++++++++- .../scheduler/capacity/LeafQueue.java | 21 +++++------------- .../scheduler/capacity/TestLeafQueue.java | 8 ++++--- 5 files changed, 50 insertions(+), 19 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index ac2e7300577..2f071b8e7ee 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -290,6 +290,9 @@ Release 0.23.6 - UNRELEASED YARN-320. RM should always be able to renew its own tokens. (Daryn Sharp via sseth) + YARN-325. RM CapacityScheduler can deadlock when getQueueInfo() is + called and a container is completing (Arun C Murthy via tgraves) + Release 0.23.5 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSAssignment.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSAssignment.java index f994c6d7122..1f1250a2b64 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSAssignment.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSAssignment.java @@ -20,18 +20,32 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; @Private @Unstable public class CSAssignment { final private Resource resource; private NodeType type; + private final RMContainer excessReservation; + private final FiCaSchedulerApp application; public CSAssignment(Resource resource, NodeType type) { this.resource = resource; this.type = type; + this.application = null; + this.excessReservation = null; } + + public CSAssignment(FiCaSchedulerApp application, RMContainer excessReservation) { + this.resource = excessReservation.getContainer().getResource(); + this.type = NodeType.NODE_LOCAL; + this.application = application; + this.excessReservation = excessReservation; + } + public Resource getResource() { return resource; @@ -45,6 +59,14 @@ public class CSAssignment { this.type = type; } + public FiCaSchedulerApp getApplication() { + return application; + } + + public RMContainer getExcessReservation() { + return excessReservation; + } + @Override public String toString() { return resource.getMemory() + ":" + type; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index c6683fe6f13..2ce3a464a86 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -604,7 +604,20 @@ implements ResourceScheduler, CapacitySchedulerContext, Configurable { reservedApplication.getApplicationId() + " on node: " + nm); LeafQueue queue = ((LeafQueue)reservedApplication.getQueue()); - queue.assignContainers(clusterResource, node); + CSAssignment assignment = queue.assignContainers(clusterResource, node); + + RMContainer excessReservation = assignment.getExcessReservation(); + if (excessReservation != null) { + Container container = excessReservation.getContainer(); + queue.completedContainer( + clusterResource, assignment.getApplication(), node, + excessReservation, + SchedulerUtils.createAbnormalContainerStatus( + container.getId(), + SchedulerUtils.UNRESERVED_CONTAINER), + RMContainerEventType.RELEASED); + } + } // Try to schedule more if there are no reservations to fulfill diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index b02dda1cb2f..7656ace5b0f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -62,7 +62,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; @@ -781,11 +780,9 @@ public class LeafQueue implements CSQueue { if (reservedContainer != null) { FiCaSchedulerApp application = getApplication(reservedContainer.getApplicationAttemptId()); - return new CSAssignment( + return assignReservedContainer(application, node, reservedContainer, - clusterResource), - NodeType.NODE_LOCAL); // Don't care about locality constraints - // for reserved containers + clusterResource); } // Try to assign containers to applications in order @@ -873,20 +870,14 @@ public class LeafQueue implements CSQueue { } - private synchronized Resource assignReservedContainer(FiCaSchedulerApp application, + private synchronized CSAssignment + assignReservedContainer(FiCaSchedulerApp application, FiCaSchedulerNode node, RMContainer rmContainer, Resource clusterResource) { // Do we still need this reservation? Priority priority = rmContainer.getReservedPriority(); if (application.getTotalRequiredResources(priority) == 0) { // Release - Container container = rmContainer.getContainer(); - completedContainer(clusterResource, application, node, - rmContainer, - SchedulerUtils.createAbnormalContainerStatus( - container.getId(), - SchedulerUtils.UNRESERVED_CONTAINER), - RMContainerEventType.RELEASED); - return container.getResource(); // Ugh, return resource to force re-sort + return new CSAssignment(application, rmContainer); } // Try to assign if we have sufficient resources @@ -895,7 +886,7 @@ public class LeafQueue implements CSQueue { // Doesn't matter... since it's already charged for at time of reservation // "re-reservation" is *free* - return Resources.none(); + return new CSAssignment(Resources.none(), NodeType.NODE_LOCAL); } private synchronized boolean assignToQueue(Resource clusterResource, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index d0d23af6ba9..ccf2a47c128 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -1181,12 +1181,14 @@ public class TestLeafQueue { // Now finish another container from app_0 and see the reservation cancelled a.completedContainer(clusterResource, app_0, node_0, app_0.getLiveContainers().iterator().next(), null, RMContainerEventType.KILL); - a.assignContainers(clusterResource, node_0); - assertEquals(4*GB, a.getUsedResources().getMemory()); + CSAssignment assignment = a.assignContainers(clusterResource, node_0); + assertEquals(8*GB, a.getUsedResources().getMemory()); assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentReservation().getMemory()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); assertEquals(0*GB, node_0.getUsedResource().getMemory()); + assertEquals(4*GB, + assignment.getExcessReservation().getContainer().getResource().getMemory()); } From 3cd17b614e9436d06cd9b4ccc5f9cf59fbe1cf21 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Wed, 9 Jan 2013 21:20:38 +0000 Subject: [PATCH 03/31] HDFS-4363. Combine PBHelper and HdfsProtoUtil and remove redundant methods. Contributed by Suresh Srinivas. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431088 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSClient.java | 21 +- .../apache/hadoop/hdfs/DFSOutputStream.java | 6 +- .../apache/hadoop/hdfs/RemoteBlockReader.java | 5 +- .../hadoop/hdfs/RemoteBlockReader2.java | 5 +- .../hadoop/hdfs/protocol/HdfsProtoUtil.java | 180 ------------------ .../datatransfer/DataTransferEncryptor.java | 2 +- .../datatransfer/DataTransferProtoUtil.java | 17 +- .../protocol/datatransfer/PipelineAck.java | 2 +- .../hdfs/protocol/datatransfer/Receiver.java | 37 ++-- .../hdfs/protocol/datatransfer/Sender.java | 11 +- ...amenodeProtocolServerSideTranslatorPB.java | 6 +- .../ClientNamenodeProtocolTranslatorPB.java | 6 +- .../hadoop/hdfs/protocolPB/PBHelper.java | 65 +++++-- .../hadoop/hdfs/server/balancer/Balancer.java | 3 +- .../hadoop/hdfs/server/datanode/DataNode.java | 4 +- .../hdfs/server/datanode/DataXceiver.java | 12 +- .../hdfs/protocol/TestHdfsProtoUtil.java | 42 ---- .../hadoop/hdfs/protocolPB/TestPBHelper.java | 18 ++ 19 files changed, 138 insertions(+), 307 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsProtoUtil.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1400ae1881c..24707fdcc75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -466,6 +466,9 @@ Release 2.0.3-alpha - Unreleased HDFS-4035. LightWeightGSet and LightWeightHashSet increment a volatile without synchronization. (eli) + + HDFS-4363. Combine PBHelper and HdfsProtoUtil and remove redundant + methods. (suresh) OPTIMIZATIONS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 14bb1d22b64..b0f80c780c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -79,7 +79,6 @@ import javax.net.SocketFactory; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockStorageLocation; @@ -115,7 +114,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; @@ -128,6 +126,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; @@ -363,7 +362,7 @@ public class DFSClient implements java.io.Closeable { /** * Same as this(nameNodeUri, conf, null); - * @see #DFSClient(InetSocketAddress, Configuration, org.apache.hadoop.fs.FileSystem.Statistics) + * @see #DFSClient(URI, Configuration, FileSystem.Statistics) */ public DFSClient(URI nameNodeUri, Configuration conf ) throws IOException { @@ -372,7 +371,7 @@ public class DFSClient implements java.io.Closeable { /** * Same as this(nameNodeUri, null, conf, stats); - * @see #DFSClient(InetSocketAddress, ClientProtocol, Configuration, org.apache.hadoop.fs.FileSystem.Statistics) + * @see #DFSClient(URI, ClientProtocol, Configuration, FileSystem.Statistics) */ public DFSClient(URI nameNodeUri, Configuration conf, FileSystem.Statistics stats) @@ -1157,7 +1156,7 @@ public class DFSClient implements java.io.Closeable { /** * Call {@link #create(String, FsPermission, EnumSet, short, long, - * Progressable, int)} with default permission + * Progressable, int, ChecksumOpt)} with default permission * {@link FsPermission#getDefault()}. * * @param src File name @@ -1268,7 +1267,7 @@ public class DFSClient implements java.io.Closeable { /** * Same as {{@link #create(String, FsPermission, EnumSet, short, long, - * Progressable, int)} except that the permission + * Progressable, int, ChecksumOpt)} except that the permission * is absolute (ie has already been masked with umask. */ public DFSOutputStream primitiveCreate(String src, @@ -1453,7 +1452,7 @@ public class DFSClient implements java.io.Closeable { } /** * Delete file or directory. - * See {@link ClientProtocol#delete(String)}. + * See {@link ClientProtocol#delete(String, boolean)}. */ @Deprecated public boolean delete(String src) throws IOException { @@ -1678,7 +1677,7 @@ public class DFSClient implements java.io.Closeable { new Sender(out).blockChecksum(block, lb.getBlockToken()); final BlockOpResponseProto reply = - BlockOpResponseProto.parseFrom(HdfsProtoUtil.vintPrefixed(in)); + BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); if (reply.getStatus() != Status.SUCCESS) { if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN @@ -1725,8 +1724,8 @@ public class DFSClient implements java.io.Closeable { md5.write(md5out); // read crc-type - final DataChecksum.Type ct = HdfsProtoUtil. - fromProto(checksumData.getCrcType()); + final DataChecksum.Type ct = PBHelper.convert(checksumData + .getCrcType()); if (i == 0) { // first block crcType = ct; } else if (crcType != DataChecksum.Type.MIXED @@ -1888,7 +1887,7 @@ public class DFSClient implements java.io.Closeable { * @param isChecked * If true, then check only active namenode's safemode status, else * check first namenode's status. - * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeActio,boolean) + * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction, boolean) */ public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException{ return namenode.setSafeMode(action, isChecked); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index ec8d118bff0..7245f8ac909 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; @@ -66,6 +65,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; @@ -883,7 +883,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable { //ack BlockOpResponseProto response = - BlockOpResponseProto.parseFrom(HdfsProtoUtil.vintPrefixed(in)); + BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); if (SUCCESS != response.getStatus()) { throw new IOException("Failed to add a datanode"); } @@ -1073,7 +1073,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable { // receive ack for connect BlockOpResponseProto resp = BlockOpResponseProto.parseFrom( - HdfsProtoUtil.vintPrefixed(blockReplyStream)); + PBHelper.vintPrefixed(blockReplyStream)); pipelineStatus = resp.getStatus(); firstBadLink = resp.getFirstBadLink(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java index 2bcd96e7644..dc449ee2f24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed; - import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.DataInputStream; @@ -39,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.io.IOUtils; @@ -392,7 +391,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader { bufferSize)); BlockOpResponseProto status = BlockOpResponseProto.parseFrom( - vintPrefixed(in)); + PBHelper.vintPrefixed(in)); RemoteBlockReader2.checkSuccess(status, sock, block, file); ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java index b9a5c76ec31..3450cd1524d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed; - import java.io.BufferedOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; @@ -43,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatus import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.net.SocketInputWrapper; @@ -401,7 +400,7 @@ public class RemoteBlockReader2 implements BlockReader { DataInputStream in = new DataInputStream(ioStreams.in); BlockOpResponseProto status = BlockOpResponseProto.parseFrom( - vintPrefixed(in)); + PBHelper.vintPrefixed(in)); checkSuccess(status, sock, block, file); ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java deleted file mode 100644 index ab8b95534b9..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java +++ /dev/null @@ -1,180 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.protocol; - -import java.io.EOFException; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.util.ExactSizeInputStream; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.util.DataChecksum; -import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; -import org.apache.hadoop.security.token.Token; - -import com.google.common.collect.Lists; -import com.google.protobuf.ByteString; -import com.google.protobuf.CodedInputStream; - -/** - * Utilities for converting to and from protocol buffers used in the - * HDFS wire protocol, as well as some generic utilities useful - * for dealing with protocol buffers. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public abstract class HdfsProtoUtil { - - //// Block Token //// - - public static TokenProto toProto(Token blockToken) { - return TokenProto.newBuilder() - .setIdentifier(ByteString.copyFrom(blockToken.getIdentifier())) - .setPassword(ByteString.copyFrom(blockToken.getPassword())) - .setKind(blockToken.getKind().toString()) - .setService(blockToken.getService().toString()) - .build(); - } - - public static Token fromProto(TokenProto proto) { - return new Token(proto.getIdentifier().toByteArray(), - proto.getPassword().toByteArray(), - new Text(proto.getKind()), - new Text(proto.getService())); - } - - //// Extended Block //// - - public static HdfsProtos.ExtendedBlockProto toProto(ExtendedBlock block) { - return HdfsProtos.ExtendedBlockProto.newBuilder() - .setBlockId(block.getBlockId()) - .setPoolId(block.getBlockPoolId()) - .setNumBytes(block.getNumBytes()) - .setGenerationStamp(block.getGenerationStamp()) - .build(); - } - - public static ExtendedBlock fromProto(HdfsProtos.ExtendedBlockProto proto) { - return new ExtendedBlock( - proto.getPoolId(), proto.getBlockId(), - proto.getNumBytes(), proto.getGenerationStamp()); - } - - //// DatanodeID //// - - private static HdfsProtos.DatanodeIDProto toProto( - DatanodeID dni) { - return HdfsProtos.DatanodeIDProto.newBuilder() - .setIpAddr(dni.getIpAddr()) - .setHostName(dni.getHostName()) - .setStorageID(dni.getStorageID()) - .setXferPort(dni.getXferPort()) - .setInfoPort(dni.getInfoPort()) - .setIpcPort(dni.getIpcPort()) - .build(); - } - - private static DatanodeID fromProto(HdfsProtos.DatanodeIDProto idProto) { - return new DatanodeID( - idProto.getIpAddr(), - idProto.getHostName(), - idProto.getStorageID(), - idProto.getXferPort(), - idProto.getInfoPort(), - idProto.getIpcPort()); - } - - //// DatanodeInfo //// - - public static HdfsProtos.DatanodeInfoProto toProto(DatanodeInfo dni) { - return HdfsProtos.DatanodeInfoProto.newBuilder() - .setId(toProto((DatanodeID)dni)) - .setCapacity(dni.getCapacity()) - .setDfsUsed(dni.getDfsUsed()) - .setRemaining(dni.getRemaining()) - .setBlockPoolUsed(dni.getBlockPoolUsed()) - .setLastUpdate(dni.getLastUpdate()) - .setXceiverCount(dni.getXceiverCount()) - .setLocation(dni.getNetworkLocation()) - .setAdminState(HdfsProtos.DatanodeInfoProto.AdminState.valueOf( - dni.getAdminState().name())) - .build(); - } - - public static DatanodeInfo fromProto(HdfsProtos.DatanodeInfoProto dniProto) { - DatanodeInfo dniObj = new DatanodeInfo(fromProto(dniProto.getId()), - dniProto.getLocation()); - - dniObj.setCapacity(dniProto.getCapacity()); - dniObj.setDfsUsed(dniProto.getDfsUsed()); - dniObj.setRemaining(dniProto.getRemaining()); - dniObj.setBlockPoolUsed(dniProto.getBlockPoolUsed()); - dniObj.setLastUpdate(dniProto.getLastUpdate()); - dniObj.setXceiverCount(dniProto.getXceiverCount()); - dniObj.setAdminState(DatanodeInfo.AdminStates.valueOf( - dniProto.getAdminState().name())); - return dniObj; - } - - public static ArrayList toProtos( - DatanodeInfo[] dnInfos, int startIdx) { - ArrayList protos = - Lists.newArrayListWithCapacity(dnInfos.length); - for (int i = startIdx; i < dnInfos.length; i++) { - protos.add(toProto(dnInfos[i])); - } - return protos; - } - - public static DatanodeInfo[] fromProtos( - List targetsList) { - DatanodeInfo[] ret = new DatanodeInfo[targetsList.size()]; - int i = 0; - for (HdfsProtos.DatanodeInfoProto proto : targetsList) { - ret[i++] = fromProto(proto); - } - return ret; - } - - public static DataChecksum.Type fromProto(HdfsProtos.ChecksumTypeProto type) { - return DataChecksum.Type.valueOf(type.getNumber()); - } - - public static HdfsProtos.ChecksumTypeProto toProto(DataChecksum.Type type) { - return HdfsProtos.ChecksumTypeProto.valueOf(type.id); - } - - public static InputStream vintPrefixed(final InputStream input) - throws IOException { - final int firstByte = input.read(); - if (firstByte == -1) { - throw new EOFException("Premature EOF: no length prefix available"); - } - - int size = CodedInputStream.readRawVarint32(firstByte, input); - assert size >= 0; - - return new ExactSizeInputStream(input, size); - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java index ce81135a40c..229480b927b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol.datatransfer; -import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed; +import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; import java.io.DataInputStream; import java.io.DataOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java index bdd4df06843..6be3810c918 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java @@ -21,12 +21,12 @@ package org.apache.hadoop.hdfs.protocol.datatransfer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; @@ -41,18 +41,16 @@ import org.apache.hadoop.util.DataChecksum; public abstract class DataTransferProtoUtil { static BlockConstructionStage fromProto( OpWriteBlockProto.BlockConstructionStage stage) { - return BlockConstructionStage.valueOf(BlockConstructionStage.class, - stage.name()); + return BlockConstructionStage.valueOf(stage.name()); } static OpWriteBlockProto.BlockConstructionStage toProto( BlockConstructionStage stage) { - return OpWriteBlockProto.BlockConstructionStage.valueOf( - stage.name()); + return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name()); } public static ChecksumProto toProto(DataChecksum checksum) { - ChecksumTypeProto type = HdfsProtoUtil.toProto(checksum.getChecksumType()); + ChecksumTypeProto type = PBHelper.convert(checksum.getChecksumType()); // ChecksumType#valueOf never returns null return ChecksumProto.newBuilder() .setBytesPerChecksum(checksum.getBytesPerChecksum()) @@ -64,8 +62,7 @@ public abstract class DataTransferProtoUtil { if (proto == null) return null; int bytesPerChecksum = proto.getBytesPerChecksum(); - DataChecksum.Type type = HdfsProtoUtil.fromProto(proto.getType()); - + DataChecksum.Type type = PBHelper.convert(proto.getType()); return DataChecksum.newDataChecksum(type, bytesPerChecksum); } @@ -82,8 +79,8 @@ public abstract class DataTransferProtoUtil { static BaseHeaderProto buildBaseHeader(ExtendedBlock blk, Token blockToken) { return BaseHeaderProto.newBuilder() - .setBlock(HdfsProtoUtil.toProto(blk)) - .setToken(HdfsProtoUtil.toProto(blockToken)) + .setBlock(PBHelper.convert(blk)) + .setToken(PBHelper.convert(blockToken)) .build(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java index 94d9724b8c6..b743e29f217 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol.datatransfer; -import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed; +import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; import java.io.IOException; import java.io.InputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java index ff7a81babd7..b1edc20e3a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java @@ -17,9 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol.datatransfer; -import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.fromProto; -import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.fromProtos; -import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed; +import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.fromProto; import java.io.DataInputStream; @@ -33,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; /** Receiver */ @InterfaceAudience.Private @@ -85,8 +84,8 @@ public abstract class Receiver implements DataTransferProtocol { /** Receive OP_READ_BLOCK */ private void opReadBlock() throws IOException { OpReadBlockProto proto = OpReadBlockProto.parseFrom(vintPrefixed(in)); - readBlock(fromProto(proto.getHeader().getBaseHeader().getBlock()), - fromProto(proto.getHeader().getBaseHeader().getToken()), + readBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), + PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), proto.getOffset(), proto.getLen()); @@ -95,11 +94,11 @@ public abstract class Receiver implements DataTransferProtocol { /** Receive OP_WRITE_BLOCK */ private void opWriteBlock(DataInputStream in) throws IOException { final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in)); - writeBlock(fromProto(proto.getHeader().getBaseHeader().getBlock()), - fromProto(proto.getHeader().getBaseHeader().getToken()), + writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), + PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), - fromProtos(proto.getTargetsList()), - fromProto(proto.getSource()), + PBHelper.convert(proto.getTargetsList()), + PBHelper.convert(proto.getSource()), fromProto(proto.getStage()), proto.getPipelineSize(), proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(), @@ -111,33 +110,33 @@ public abstract class Receiver implements DataTransferProtocol { private void opTransferBlock(DataInputStream in) throws IOException { final OpTransferBlockProto proto = OpTransferBlockProto.parseFrom(vintPrefixed(in)); - transferBlock(fromProto(proto.getHeader().getBaseHeader().getBlock()), - fromProto(proto.getHeader().getBaseHeader().getToken()), + transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), + PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), - fromProtos(proto.getTargetsList())); + PBHelper.convert(proto.getTargetsList())); } /** Receive OP_REPLACE_BLOCK */ private void opReplaceBlock(DataInputStream in) throws IOException { OpReplaceBlockProto proto = OpReplaceBlockProto.parseFrom(vintPrefixed(in)); - replaceBlock(fromProto(proto.getHeader().getBlock()), - fromProto(proto.getHeader().getToken()), + replaceBlock(PBHelper.convert(proto.getHeader().getBlock()), + PBHelper.convert(proto.getHeader().getToken()), proto.getDelHint(), - fromProto(proto.getSource())); + PBHelper.convert(proto.getSource())); } /** Receive OP_COPY_BLOCK */ private void opCopyBlock(DataInputStream in) throws IOException { OpCopyBlockProto proto = OpCopyBlockProto.parseFrom(vintPrefixed(in)); - copyBlock(fromProto(proto.getHeader().getBlock()), - fromProto(proto.getHeader().getToken())); + copyBlock(PBHelper.convert(proto.getHeader().getBlock()), + PBHelper.convert(proto.getHeader().getToken())); } /** Receive OP_BLOCK_CHECKSUM */ private void opBlockChecksum(DataInputStream in) throws IOException { OpBlockChecksumProto proto = OpBlockChecksumProto.parseFrom(vintPrefixed(in)); - blockChecksum(fromProto(proto.getHeader().getBlock()), - fromProto(proto.getHeader().getToken())); + blockChecksum(PBHelper.convert(proto.getHeader().getBlock()), + PBHelper.convert(proto.getHeader().getToken())); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java index 03e13080612..8184c500f8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs.protocol.datatransfer; -import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.toProto; -import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.toProtos; import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.toProto; import java.io.DataOutput; @@ -37,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; @@ -105,7 +104,7 @@ public class Sender implements DataTransferProtocol { OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder() .setHeader(header) - .addAllTargets(toProtos(targets, 1)) + .addAllTargets(PBHelper.convert(targets, 1)) .setStage(toProto(stage)) .setPipelineSize(pipelineSize) .setMinBytesRcvd(minBytesRcvd) @@ -114,7 +113,7 @@ public class Sender implements DataTransferProtocol { .setRequestedChecksum(checksumProto); if (source != null) { - proto.setSource(toProto(source)); + proto.setSource(PBHelper.convertDatanodeInfo(source)); } send(out, Op.WRITE_BLOCK, proto.build()); @@ -129,7 +128,7 @@ public class Sender implements DataTransferProtocol { OpTransferBlockProto proto = OpTransferBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildClientHeader( blk, clientName, blockToken)) - .addAllTargets(toProtos(targets, 0)) + .addAllTargets(PBHelper.convert(targets)) .build(); send(out, Op.TRANSFER_BLOCK, proto); @@ -143,7 +142,7 @@ public class Sender implements DataTransferProtocol { OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken)) .setDelHint(delHint) - .setSource(toProto(source)) + .setSource(PBHelper.convertDatanodeInfo(source)) .build(); send(out, Op.REPLACE_BLOCK, proto); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 86bf98b17a3..82eb8169cec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.protocolPB; import java.io.IOException; -import java.util.Arrays; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; @@ -131,7 +130,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.security.token.Token; import com.google.protobuf.RpcController; @@ -494,10 +492,10 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements RpcController controller, GetDatanodeReportRequestProto req) throws ServiceException { try { - DatanodeInfoProto[] result = PBHelper.convert(server + List result = PBHelper.convert(server .getDatanodeReport(PBHelper.convert(req.getType()))); return GetDatanodeReportResponseProto.newBuilder() - .addAllDi(Arrays.asList(result)).build(); + .addAllDi(result).build(); } catch (IOException e) { throw new ServiceException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 3a89dd9bf54..74927ea4ad8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -282,7 +282,7 @@ public class ClientNamenodeProtocolTranslatorPB implements if (previous != null) req.setPrevious(PBHelper.convert(previous)); if (excludeNodes != null) - req.addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes))); + req.addAllExcludeNodes(PBHelper.convert(excludeNodes)); try { return PBHelper.convert(rpcProxy.addBlock(null, req.build()).getBlock()); } catch (ServiceException e) { @@ -300,8 +300,8 @@ public class ClientNamenodeProtocolTranslatorPB implements .newBuilder() .setSrc(src) .setBlk(PBHelper.convert(blk)) - .addAllExistings(Arrays.asList(PBHelper.convert(existings))) - .addAllExcludes(Arrays.asList(PBHelper.convert(excludes))) + .addAllExistings(PBHelper.convert(existings)) + .addAllExcludes(PBHelper.convert(excludes)) .setNumAdditionalNodes(numAdditionalNodes) .setClientName(clientName) .build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 38710b98ddc..1ef79b52c37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs.protocolPB; +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; @@ -40,10 +43,10 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; -import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; @@ -127,15 +130,20 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStat import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; +import org.apache.hadoop.hdfs.util.ExactSizeInputStream; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.DataChecksum; +import com.google.common.collect.Lists; import com.google.protobuf.ByteString; +import com.google.protobuf.CodedInputStream; /** - * Utilities for converting protobuf classes to and from implementation classes. + * Utilities for converting protobuf classes to and from implementation classes + * and other helper utilities to help in dealing with protobuf. * * Note that when converting from an internal type to protobuf type, the * converter never return null for protobuf type. The check for internal type @@ -219,7 +227,8 @@ public class PBHelper { // Arrays of DatanodeId public static DatanodeIDProto[] convert(DatanodeID[] did) { - if (did == null) return null; + if (did == null) + return null; final int len = did.length; DatanodeIDProto[] result = new DatanodeIDProto[len]; for (int i = 0; i < len; ++i) { @@ -482,14 +491,26 @@ public class PBHelper { } return result; } + + public static List convert( + DatanodeInfo[] dnInfos) { + return convert(dnInfos, 0); + } - static public DatanodeInfoProto[] convert(DatanodeInfo[] di) { - if (di == null) return null; - DatanodeInfoProto[] result = new DatanodeInfoProto[di.length]; - for (int i = 0; i < di.length; i++) { - result[i] = PBHelper.convertDatanodeInfo(di[i]); + /** + * Copy from {@code dnInfos} to a target of list of same size starting at + * {@code startIdx}. + */ + public static List convert( + DatanodeInfo[] dnInfos, int startIdx) { + if (dnInfos == null) + return null; + ArrayList protos = Lists + .newArrayListWithCapacity(dnInfos.length); + for (int i = startIdx; i < dnInfos.length; i++) { + protos.add(convert(dnInfos[i])); } - return result; + return protos; } public static DatanodeInfo[] convert(List list) { @@ -694,7 +715,7 @@ public class PBHelper { DatanodeInfosProto[] ret = new DatanodeInfosProto[targets.length]; for (int i = 0; i < targets.length; i++) { ret[i] = DatanodeInfosProto.newBuilder() - .addAllDatanodes(Arrays.asList(PBHelper.convert(targets[i]))).build(); + .addAllDatanodes(PBHelper.convert(targets[i])).build(); } return Arrays.asList(ret); } @@ -963,7 +984,7 @@ public class PBHelper { fs.getFileBufferSize(), fs.getEncryptDataTransfer(), fs.getTrashInterval(), - HdfsProtoUtil.fromProto(fs.getChecksumType())); + PBHelper.convert(fs.getChecksumType())); } public static FsServerDefaultsProto convert(FsServerDefaults fs) { @@ -976,7 +997,7 @@ public class PBHelper { .setFileBufferSize(fs.getFileBufferSize()) .setEncryptDataTransfer(fs.getEncryptDataTransfer()) .setTrashInterval(fs.getTrashInterval()) - .setChecksumType(HdfsProtoUtil.toProto(fs.getChecksumType())) + .setChecksumType(PBHelper.convert(fs.getChecksumType())) .build(); } @@ -1314,4 +1335,24 @@ public class PBHelper { .setLayoutVersion(j.getLayoutVersion()) .setNamespaceID(j.getNamespaceId()).build(); } + + public static DataChecksum.Type convert(HdfsProtos.ChecksumTypeProto type) { + return DataChecksum.Type.valueOf(type.getNumber()); + } + + public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) { + return HdfsProtos.ChecksumTypeProto.valueOf(type.id); + } + + public static InputStream vintPrefixed(final InputStream input) + throws IOException { + final int firstByte = input.read(); + if (firstByte == -1) { + throw new EOFException("Premature EOF: no length prefix available"); + } + + int size = CodedInputStream.readRawVarint32(firstByte, input); + assert size >= 0; + return new ExactSizeInputStream(input, size); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index f5b76efc417..049e6691913 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -18,7 +18,8 @@ package org.apache.hadoop.hdfs.server.balancer; import static com.google.common.base.Preconditions.checkArgument; -import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed; + +import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 467d44e80ef..58bdedc72ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -98,7 +98,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; @@ -115,6 +114,7 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; @@ -1468,7 +1468,7 @@ public class DataNode extends Configured // read ack if (isClient) { DNTransferAckProto closeAck = DNTransferAckProto.parseFrom( - HdfsProtoUtil.vintPrefixed(in)); + PBHelper.vintPrefixed(in)); if (LOG.isDebugEnabled()) { LOG.debug(getClass().getSimpleName() + ": close-ack=" + closeAck); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 31b896caf93..255fd35ff35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -42,7 +42,6 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor.InvalidMagicNumberException; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; @@ -56,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatus import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @@ -144,7 +144,7 @@ class DataXceiver extends Receiver implements Runnable { /** Return the datanode object. */ DataNode getDataNode() {return datanode;} - private OutputStream getOutputStream() throws IOException { + private OutputStream getOutputStream() { return socketOut; } @@ -284,7 +284,7 @@ class DataXceiver extends Receiver implements Runnable { // to respond with a Status enum. try { ClientReadStatusProto stat = ClientReadStatusProto.parseFrom( - HdfsProtoUtil.vintPrefixed(in)); + PBHelper.vintPrefixed(in)); if (!stat.hasStatus()) { LOG.warn("Client " + s.getInetAddress() + " did not send a valid status " + "code after reading. Will close connection."); @@ -445,7 +445,7 @@ class DataXceiver extends Receiver implements Runnable { // read connect ack (only for clients, not for replication req) if (isClient) { BlockOpResponseProto connectAck = - BlockOpResponseProto.parseFrom(HdfsProtoUtil.vintPrefixed(mirrorIn)); + BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(mirrorIn)); mirrorInStatus = connectAck.getStatus(); firstBadLink = connectAck.getFirstBadLink(); if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) { @@ -606,7 +606,7 @@ class DataXceiver extends Receiver implements Runnable { .setBytesPerCrc(bytesPerCRC) .setCrcPerBlock(crcPerBlock) .setMd5(ByteString.copyFrom(md5.getDigest())) - .setCrcType(HdfsProtoUtil.toProto(checksum.getChecksumType())) + .setCrcType(PBHelper.convert(checksum.getChecksumType())) ) .build() .writeDelimitedTo(out); @@ -765,7 +765,7 @@ class DataXceiver extends Receiver implements Runnable { // receive the response from the proxy BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom( - HdfsProtoUtil.vintPrefixed(proxyReply)); + PBHelper.vintPrefixed(proxyReply)); if (copyResponse.getStatus() != SUCCESS) { if (copyResponse.getStatus() == ERROR_ACCESS_TOKEN) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsProtoUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsProtoUtil.java deleted file mode 100644 index 0a04e3c521f..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsProtoUtil.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.protocol; - -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; -import org.apache.hadoop.util.DataChecksum; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - -public class TestHdfsProtoUtil { - @Test - public void testChecksumTypeProto() { - assertEquals(DataChecksum.Type.NULL, - HdfsProtoUtil.fromProto(HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL)); - assertEquals(DataChecksum.Type.CRC32, - HdfsProtoUtil.fromProto(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32)); - assertEquals(DataChecksum.Type.CRC32C, - HdfsProtoUtil.fromProto(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C)); - assertEquals(HdfsProtoUtil.toProto(DataChecksum.Type.NULL), - HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL); - assertEquals(HdfsProtoUtil.toProto(DataChecksum.Type.CRC32), - HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32); - assertEquals(HdfsProtoUtil.toProto(DataChecksum.Type.CRC32C), - HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C); - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 65a6ed0fe2d..23cd46e7323 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto; @@ -70,6 +71,7 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.DataChecksum; import org.junit.Test; import com.google.common.base.Joiner; @@ -471,4 +473,20 @@ public class TestPBHelper { } } } + + @Test + public void testChecksumTypeProto() { + assertEquals(DataChecksum.Type.NULL, + PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL)); + assertEquals(DataChecksum.Type.CRC32, + PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32)); + assertEquals(DataChecksum.Type.CRC32C, + PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C)); + assertEquals(PBHelper.convert(DataChecksum.Type.NULL), + HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL); + assertEquals(PBHelper.convert(DataChecksum.Type.CRC32), + HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32); + assertEquals(PBHelper.convert(DataChecksum.Type.CRC32C), + HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C); + } } From 3555e7c574de5a6d163c5375a31de290776b2ab0 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Wed, 9 Jan 2013 22:29:41 +0000 Subject: [PATCH 04/31] HDFS-4306. PBHelper.convertLocatedBlock miss convert BlockToken. Contributed by Binglin Chang. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431117 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/protocolPB/PBHelper.java | 19 +---- .../hadoop/hdfs/protocolPB/TestPBHelper.java | 74 +++++++++++++++---- 3 files changed, 66 insertions(+), 30 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 24707fdcc75..8e381dd686a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -679,6 +679,9 @@ Release 2.0.3-alpha - Unreleased HDFS-3970. Fix bug causing rollback of HDFS upgrade to result in bad VERSION file. (Vinay and Andrew Wang via atm) + HDFS-4306. PBHelper.convertLocatedBlock miss convert BlockToken. (Binglin + Chang via atm) + BREAKDOWN OF HDFS-3077 SUBTASKS HDFS-3077. Quorum-based protocol for reading and writing edit logs. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 1ef79b52c37..eec66821653 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -884,25 +884,14 @@ public class PBHelper { // Located Block Arrays and Lists public static LocatedBlockProto[] convertLocatedBlock(LocatedBlock[] lb) { if (lb == null) return null; - final int len = lb.length; - LocatedBlockProto[] result = new LocatedBlockProto[len]; - for (int i = 0; i < len; ++i) { - result[i] = PBHelper.convert(lb[i]); - } - return result; + return convertLocatedBlock2(Arrays.asList(lb)).toArray( + new LocatedBlockProto[lb.length]); } public static LocatedBlock[] convertLocatedBlock(LocatedBlockProto[] lb) { if (lb == null) return null; - final int len = lb.length; - LocatedBlock[] result = new LocatedBlock[len]; - for (int i = 0; i < len; ++i) { - result[i] = new LocatedBlock( - PBHelper.convert(lb[i].getB()), - PBHelper.convert(lb[i].getLocsList()), - lb[i].getOffset(), lb[i].getCorrupt()); - } - return result; + return convertLocatedBlock(Arrays.asList(lb)).toArray( + new LocatedBlock[lb.length]); } public static List convertLocatedBlock( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 23cd46e7323..b6c2f6ec29e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -405,29 +405,73 @@ public class TestPBHelper { assertEquals(expected.getKind(), actual.getKind()); assertEquals(expected.getService(), actual.getService()); } - - @Test - public void testConvertLocatedBlock() { - DatanodeInfo [] dnInfos = { - DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), - DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), - DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL) + + private void compare(LocatedBlock expected, LocatedBlock actual) { + assertEquals(expected.getBlock(), actual.getBlock()); + compare(expected.getBlockToken(), actual.getBlockToken()); + assertEquals(expected.getStartOffset(), actual.getStartOffset()); + assertEquals(expected.isCorrupt(), actual.isCorrupt()); + DatanodeInfo [] ei = expected.getLocations(); + DatanodeInfo [] ai = actual.getLocations(); + assertEquals(ei.length, ai.length); + for (int i = 0; i < ei.length ; i++) { + compare(ei[i], ai[i]); + } + } + + private LocatedBlock createLocatedBlock() { + DatanodeInfo[] dnInfos = { + DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", + AdminStates.DECOMMISSION_INPROGRESS), + DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", + AdminStates.DECOMMISSIONED), + DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", + AdminStates.NORMAL) }; LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false); + lb.setBlockToken(new Token( + "identifier".getBytes(), "password".getBytes(), new Text("kind"), + new Text("service"))); + return lb; + } + + @Test + public void testConvertLocatedBlock() { + LocatedBlock lb = createLocatedBlock(); LocatedBlockProto lbProto = PBHelper.convert(lb); LocatedBlock lb2 = PBHelper.convert(lbProto); - assertEquals(lb.getBlock(), lb2.getBlock()); - compare(lb.getBlockToken(), lb2.getBlockToken()); - assertEquals(lb.getStartOffset(), lb2.getStartOffset()); - assertEquals(lb.isCorrupt(), lb2.isCorrupt()); - DatanodeInfo [] dnInfos2 = lb2.getLocations(); - assertEquals(dnInfos.length, dnInfos2.length); - for (int i = 0; i < dnInfos.length ; i++) { - compare(dnInfos[i], dnInfos2[i]); + compare(lb,lb2); + } + + @Test + public void testConvertLocatedBlockList() { + ArrayList lbl = new ArrayList(); + for (int i=0;i<3;i++) { + lbl.add(createLocatedBlock()); + } + List lbpl = PBHelper.convertLocatedBlock2(lbl); + List lbl2 = PBHelper.convertLocatedBlock(lbpl); + assertEquals(lbl.size(), lbl2.size()); + for (int i=0;i Date: Wed, 9 Jan 2013 22:56:09 +0000 Subject: [PATCH 05/31] MAPREDUCE-4848. TaskAttemptContext cast error during AM recovery. Contributed by Jerry Chen git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431131 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../hadoop/mapreduce/v2/app/MRAppMaster.java | 2 +- .../v2/app/recover/RecoveryService.java | 18 ++- .../hadoop/mapreduce/v2/app/TestRecovery.java | 109 ++++++++++++++++++ 4 files changed, 128 insertions(+), 4 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index eb7fbb04c72..65fbf1d6d63 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -685,6 +685,9 @@ Release 0.23.6 - UNRELEASED MAPREDUCE-4913. TestMRAppMaster#testMRAppMasterMissingStaging occasionally exits (Jason Lowe via tgraves) + MAPREDUCE-4848. TaskAttemptContext cast error during AM recovery (Jerry + Chen via jlowe) + Release 0.23.5 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index 405c4748032..b3b307a56fa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -579,7 +579,7 @@ public class MRAppMaster extends CompositeService { */ protected Recovery createRecoveryService(AppContext appContext) { return new RecoveryService(appContext.getApplicationAttemptId(), - appContext.getClock(), getCommitter()); + appContext.getClock(), getCommitter(), isNewApiCommitter()); } /** Create and initialize (but don't start) a single job. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java index 2760b58f3f3..4ab61c52351 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java @@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.TaskAttemptContext; @@ -100,6 +101,7 @@ public class RecoveryService extends CompositeService implements Recovery { private final ApplicationAttemptId applicationAttemptId; private final OutputCommitter committer; + private final boolean newApiCommitter; private final Dispatcher dispatcher; private final ControlledClock clock; @@ -113,10 +115,11 @@ public class RecoveryService extends CompositeService implements Recovery { private volatile boolean recoveryMode = false; public RecoveryService(ApplicationAttemptId applicationAttemptId, - Clock clock, OutputCommitter committer) { + Clock clock, OutputCommitter committer, boolean newApiCommitter) { super("RecoveringDispatcher"); this.applicationAttemptId = applicationAttemptId; this.committer = committer; + this.newApiCommitter = newApiCommitter; this.dispatcher = createRecoveryDispatcher(); this.clock = new ControlledClock(clock); addService((Service) dispatcher); @@ -360,8 +363,17 @@ public class RecoveryService extends CompositeService implements Recovery { switch (state) { case SUCCEEDED: //recover the task output - TaskAttemptContext taskContext = new TaskAttemptContextImpl(getConfig(), - attInfo.getAttemptId()); + + // check the committer type and construct corresponding context + TaskAttemptContext taskContext = null; + if(newApiCommitter) { + taskContext = new TaskAttemptContextImpl(getConfig(), + attInfo.getAttemptId()); + } else { + taskContext = new org.apache.hadoop.mapred.TaskAttemptContextImpl(new JobConf(getConfig()), + TypeConverter.fromYarn(aId)); + } + try { TaskType type = taskContext.getTaskAttemptID().getTaskID().getTaskType(); int numReducers = taskContext.getConfiguration().getInt(MRJobConfig.NUM_REDUCES, 1); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java index 87fce7ece6b..8d6ca2b9b24 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java @@ -626,6 +626,115 @@ public class TestRecovery { validateOutput(); } + @Test + public void testRecoveryWithOldCommiter() throws Exception { + int runCount = 0; + MRApp app = new MRAppWithHistory(1, 2, false, this.getClass().getName(), + true, ++runCount); + Configuration conf = new Configuration(); + conf.setBoolean("mapred.mapper.new-api", false); + conf.setBoolean("mapred.reducer.new-api", false); + conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); + conf.set(FileOutputFormat.OUTDIR, outputDir.toString()); + Job job = app.submit(conf); + app.waitForState(job, JobState.RUNNING); + Assert.assertEquals("No of tasks not correct", + 3, job.getTasks().size()); + Iterator it = job.getTasks().values().iterator(); + Task mapTask1 = it.next(); + Task reduceTask1 = it.next(); + + // all maps must be running + app.waitForState(mapTask1, TaskState.RUNNING); + + TaskAttempt task1Attempt1 = mapTask1.getAttempts().values().iterator() + .next(); + + //before sending the TA_DONE, event make sure attempt has come to + //RUNNING state + app.waitForState(task1Attempt1, TaskAttemptState.RUNNING); + + //send the done signal to the map + app.getContext().getEventHandler().handle( + new TaskAttemptEvent( + task1Attempt1.getID(), + TaskAttemptEventType.TA_DONE)); + + //wait for map task to complete + app.waitForState(mapTask1, TaskState.SUCCEEDED); + + // Verify the shuffle-port + Assert.assertEquals(5467, task1Attempt1.getShufflePort()); + + app.waitForState(reduceTask1, TaskState.RUNNING); + TaskAttempt reduce1Attempt1 = reduceTask1.getAttempts().values().iterator().next(); + + // write output corresponding to reduce1 + writeOutput(reduce1Attempt1, conf); + + //send the done signal to the 1st reduce + app.getContext().getEventHandler().handle( + new TaskAttemptEvent( + reduce1Attempt1.getID(), + TaskAttemptEventType.TA_DONE)); + + //wait for first reduce task to complete + app.waitForState(reduceTask1, TaskState.SUCCEEDED); + + //stop the app before the job completes. + app.stop(); + + //rerun + //in rerun the map will be recovered from previous run + app = new MRAppWithHistory(1, 2, false, this.getClass().getName(), false, + ++runCount); + conf = new Configuration(); + conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true); + conf.setBoolean("mapred.mapper.new-api", false); + conf.setBoolean("mapred.reducer.new-api", false); + conf.set(FileOutputFormat.OUTDIR, outputDir.toString()); + conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); + job = app.submit(conf); + app.waitForState(job, JobState.RUNNING); + Assert.assertEquals("No of tasks not correct", + 3, job.getTasks().size()); + it = job.getTasks().values().iterator(); + mapTask1 = it.next(); + reduceTask1 = it.next(); + Task reduceTask2 = it.next(); + + // map will be recovered, no need to send done + app.waitForState(mapTask1, TaskState.SUCCEEDED); + + // Verify the shuffle-port after recovery + task1Attempt1 = mapTask1.getAttempts().values().iterator().next(); + Assert.assertEquals(5467, task1Attempt1.getShufflePort()); + + // first reduce will be recovered, no need to send done + app.waitForState(reduceTask1, TaskState.SUCCEEDED); + + app.waitForState(reduceTask2, TaskState.RUNNING); + + TaskAttempt reduce2Attempt = reduceTask2.getAttempts().values() + .iterator().next(); + //before sending the TA_DONE, event make sure attempt has come to + //RUNNING state + app.waitForState(reduce2Attempt, TaskAttemptState.RUNNING); + + //send the done signal to the 2nd reduce task + app.getContext().getEventHandler().handle( + new TaskAttemptEvent( + reduce2Attempt.getID(), + TaskAttemptEventType.TA_DONE)); + + //wait to get it completed + app.waitForState(reduceTask2, TaskState.SUCCEEDED); + + app.waitForState(job, JobState.SUCCEEDED); + app.verifyCompleted(); + validateOutput(); + } + private void writeBadOutput(TaskAttempt attempt, Configuration conf) throws Exception { TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, From 7e599d9e3b852954a5a21b4738817c7aabfa1bc8 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Wed, 9 Jan 2013 23:30:41 +0000 Subject: [PATCH 06/31] HADOOP-9155. FsPermission should have different default value, 777 for directory and 666 for file. Contributed by Binglin Chang. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431148 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../org/apache/hadoop/fs/FileContext.java | 24 +++++++++++++++--- .../java/org/apache/hadoop/fs/FileStatus.java | 13 +++++++--- .../java/org/apache/hadoop/fs/FileSystem.java | 6 ++--- .../apache/hadoop/fs/ftp/FTPFileSystem.java | 4 +-- .../apache/hadoop/fs/local/RawLocalFs.java | 2 +- .../hadoop/fs/permission/FsPermission.java | 25 ++++++++++++++++++- .../hadoop/fs/FileContextPermissionBase.java | 2 +- .../org/apache/hadoop/fs/TestFileStatus.java | 4 +-- .../TestLocalFSFileContextMainOperations.java | 12 +++++++++ .../fs/TestLocalFileSystemPermission.java | 2 +- .../org/apache/hadoop/hdfs/DFSClient.java | 8 +++--- .../hadoop/hdfs/protocol/HdfsFileStatus.java | 5 +++- 13 files changed, 88 insertions(+), 22 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e10bab69c3b..618933b6c47 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -525,6 +525,9 @@ Release 2.0.3-alpha - Unreleased HADOOP-9181. Set daemon flag for HttpServer's QueuedThreadPool. (Liang Xie via suresh) + HADOOP-9155. FsPermission should have different default value, 777 for + directory and 666 for file. (Binglin Chang via atm) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index 579c4e27498..978bb1ba0dd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -172,7 +172,25 @@ import org.apache.hadoop.util.ShutdownHookManager; public final class FileContext { public static final Log LOG = LogFactory.getLog(FileContext.class); + /** + * Default permission for directory and symlink + * In previous versions, this default permission was also used to + * create files, so files created end up with ugo+x permission. + * See HADOOP-9155 for detail. + * Two new constants are added to solve this, please use + * {@link FileContext#DIR_DEFAULT_PERM} for directory, and use + * {@link FileContext#FILE_DEFAULT_PERM} for file. + * This constant is kept for compatibility. + */ public static final FsPermission DEFAULT_PERM = FsPermission.getDefault(); + /** + * Default permission for directory + */ + public static final FsPermission DIR_DEFAULT_PERM = FsPermission.getDirDefault(); + /** + * Default permission for file + */ + public static final FsPermission FILE_DEFAULT_PERM = FsPermission.getFileDefault(); /** * Priority of the FileContext shutdown hook. @@ -656,7 +674,7 @@ public final class FileContext { CreateOpts.Perms permOpt = (CreateOpts.Perms) CreateOpts.getOpt(CreateOpts.Perms.class, opts); FsPermission permission = (permOpt != null) ? permOpt.getValue() : - FsPermission.getDefault(); + FILE_DEFAULT_PERM; permission = permission.applyUMask(umask); final CreateOpts[] updatedOpts = @@ -704,7 +722,7 @@ public final class FileContext { IOException { final Path absDir = fixRelativePart(dir); final FsPermission absFerms = (permission == null ? - FsPermission.getDefault() : permission).applyUMask(umask); + FsPermission.getDirDefault() : permission).applyUMask(umask); new FSLinkResolver() { @Override public Void next(final AbstractFileSystem fs, final Path p) @@ -2157,7 +2175,7 @@ public final class FileContext { FileStatus fs = FileContext.this.getFileStatus(qSrc); if (fs.isDirectory()) { checkDependencies(qSrc, qDst); - mkdir(qDst, FsPermission.getDefault(), true); + mkdir(qDst, FsPermission.getDirDefault(), true); FileStatus[] contents = listStatus(qSrc); for (FileStatus content : contents) { copy(makeQualified(content.getPath()), makeQualified(new Path(qDst, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java index ea2f1dc6169..c20f054d5e4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java @@ -79,8 +79,15 @@ public class FileStatus implements Writable, Comparable { this.blocksize = blocksize; this.modification_time = modification_time; this.access_time = access_time; - this.permission = (permission == null) ? - FsPermission.getDefault() : permission; + if (permission != null) { + this.permission = permission; + } else if (isdir) { + this.permission = FsPermission.getDirDefault(); + } else if (symlink!=null) { + this.permission = FsPermission.getDefault(); + } else { + this.permission = FsPermission.getFileDefault(); + } this.owner = (owner == null) ? "" : owner; this.group = (group == null) ? "" : group; this.symlink = symlink; @@ -217,7 +224,7 @@ public class FileStatus implements Writable, Comparable { */ protected void setPermission(FsPermission permission) { this.permission = (permission == null) ? - FsPermission.getDefault() : permission; + FsPermission.getFileDefault() : permission; } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 9c6e7df345b..9c1f2aaf73b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -850,7 +850,7 @@ public abstract class FileSystem extends Configured implements Closeable { long blockSize, Progressable progress ) throws IOException { - return this.create(f, FsPermission.getDefault().applyUMask( + return this.create(f, FsPermission.getFileDefault().applyUMask( FsPermission.getUMask(getConf())), overwrite, bufferSize, replication, blockSize, progress); } @@ -1030,7 +1030,7 @@ public abstract class FileSystem extends Configured implements Closeable { boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { - return this.createNonRecursive(f, FsPermission.getDefault(), + return this.createNonRecursive(f, FsPermission.getFileDefault(), overwrite, bufferSize, replication, blockSize, progress); } @@ -1866,7 +1866,7 @@ public abstract class FileSystem extends Configured implements Closeable { * Call {@link #mkdirs(Path, FsPermission)} with default permission. */ public boolean mkdirs(Path f) throws IOException { - return mkdirs(f, FsPermission.getDefault()); + return mkdirs(f, FsPermission.getDirDefault()); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java index d9b8ca63e1e..428bce3c9c6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java @@ -224,7 +224,7 @@ public class FTPFileSystem extends FileSystem { } Path parent = absolute.getParent(); - if (parent == null || !mkdirs(client, parent, FsPermission.getDefault())) { + if (parent == null || !mkdirs(client, parent, FsPermission.getDirDefault())) { parent = (parent == null) ? new Path("/") : parent; disconnect(client); throw new IOException("create(): Mkdirs failed to create: " + parent); @@ -484,7 +484,7 @@ public class FTPFileSystem extends FileSystem { if (!exists(client, absolute)) { Path parent = absolute.getParent(); created = (parent == null || mkdirs(client, parent, FsPermission - .getDefault())); + .getDirDefault())); if (created) { String parentDir = parent.toUri().getPath(); client.changeWorkingDirectory(parentDir); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java index 9ce0a97ab13..85178f42d00 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java @@ -85,7 +85,7 @@ public class RawLocalFs extends DelegateToFileSystem { "system: "+target.toString()); } if (createParent) { - mkdir(link.getParent(), FsPermission.getDefault(), true); + mkdir(link.getParent(), FsPermission.getDirDefault(), true); } // NB: Use createSymbolicLink in java.nio.file.Path once available try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java index a909edfd927..3db9acb2e22 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java @@ -275,11 +275,34 @@ public class FsPermission implements Writable { conf.setInt(DEPRECATED_UMASK_LABEL, umask.toShort()); } - /** Get the default permission. */ + /** + * Get the default permission for directory and symlink. + * In previous versions, this default permission was also used to + * create files, so files created end up with ugo+x permission. + * See HADOOP-9155 for detail. + * Two new methods are added to solve this, please use + * {@link FsPermission#getDirDefault()} for directory, and use + * {@link FsPermission#getFileDefault()} for file. + * This method is kept for compatibility. + */ public static FsPermission getDefault() { return new FsPermission((short)00777); } + /** + * Get the default permission for directory. + */ + public static FsPermission getDirDefault() { + return new FsPermission((short)00777); + } + + /** + * Get the default permission for file. + */ + public static FsPermission getFileDefault() { + return new FsPermission((short)00666); + } + /** * Create a FsPermission from a Unix symbolic permission string * @param unixSymbolicPermission e.g. "-rw-rw-rw-" diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java index b80764cebf0..2d7c687c6dd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java @@ -95,7 +95,7 @@ public abstract class FileContextPermissionBase { String filename = "foo"; Path f = getTestRootPath(fc, filename); createFile(fc, filename); - doFilePermissionCheck(FileContext.DEFAULT_PERM.applyUMask(fc.getUMask()), + doFilePermissionCheck(FileContext.FILE_DEFAULT_PERM.applyUMask(fc.getUMask()), fc.getFileStatus(f).getPermission()); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java index e5380484f97..5614dd6e56b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java @@ -121,7 +121,7 @@ public class TestFileStatus { FileStatus fileStatus = new FileStatus(LENGTH, isdir, REPLICATION, BLKSIZE, MTIME, PATH); validateAccessors(fileStatus, LENGTH, isdir, REPLICATION, BLKSIZE, MTIME, - 0, FsPermission.getDefault(), "", "", null, PATH); + 0, FsPermission.getDirDefault(), "", "", null, PATH); } /** @@ -131,7 +131,7 @@ public class TestFileStatus { public void constructorBlank() throws IOException { FileStatus fileStatus = new FileStatus(); validateAccessors(fileStatus, 0, false, 0, 0, 0, - 0, FsPermission.getDefault(), "", "", null, null); + 0, FsPermission.getFileDefault(), "", "", null, null); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextMainOperations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextMainOperations.java index d1c272cc859..6f9248902eb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextMainOperations.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextMainOperations.java @@ -24,6 +24,8 @@ import org.apache.hadoop.conf.Configuration; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.apache.hadoop.fs.FileContextTestHelper; +import org.apache.hadoop.fs.permission.FsPermission; public class TestLocalFSFileContextMainOperations extends FileContextMainOperationsBaseTest { @@ -47,4 +49,14 @@ public class TestLocalFSFileContextMainOperations extends FileContextMainOperati FileContext fc1 = FileContext.getLocalFSFileContext(); Assert.assertTrue(fc1 != fc); } + + @Test + public void testDefaultFilePermission() throws IOException { + Path file = FileContextTestHelper.getTestRootPath(fc, + "testDefaultFilePermission"); + FileContextTestHelper.createFile(fc, file); + FsPermission expect = FileContext.FILE_DEFAULT_PERM.applyUMask(fc.getUMask()); + Assert.assertEquals(expect, fc.getFileStatus(file) + .getPermission()); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java index 4508e144a65..5e985737d3c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java @@ -73,7 +73,7 @@ public class TestLocalFileSystemPermission extends TestCase { try { FsPermission initialPermission = getPermission(localfs, f); System.out.println(filename + ": " + initialPermission); - assertEquals(FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf)), initialPermission); + assertEquals(FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), initialPermission); } catch(Exception e) { System.out.println(StringUtils.stringifyException(e)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index b0f80c780c3..9fa98fdd077 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1157,7 +1157,7 @@ public class DFSClient implements java.io.Closeable { /** * Call {@link #create(String, FsPermission, EnumSet, short, long, * Progressable, int, ChecksumOpt)} with default permission - * {@link FsPermission#getDefault()}. + * {@link FsPermission#getFileDefault()}. * * @param src File name * @param overwrite overwrite an existing file if true @@ -1175,7 +1175,7 @@ public class DFSClient implements java.io.Closeable { Progressable progress, int buffersize) throws IOException { - return create(src, FsPermission.getDefault(), + return create(src, FsPermission.getFileDefault(), overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress, buffersize, null); @@ -1206,7 +1206,7 @@ public class DFSClient implements java.io.Closeable { * * @param src File name * @param permission The permission of the directory being created. - * If null, use default permission {@link FsPermission#getDefault()} + * If null, use default permission {@link FsPermission#getFileDefault()} * @param flag indicates create a new file or create/overwrite an * existing file or append to an existing file * @param createParent create missing parent directory if true @@ -1232,7 +1232,7 @@ public class DFSClient implements java.io.Closeable { ChecksumOpt checksumOpt) throws IOException { checkOpen(); if (permission == null) { - permission = FsPermission.getDefault(); + permission = FsPermission.getFileDefault(); } FsPermission masked = permission.applyUMask(dfsClientConf.uMask); if(LOG.isDebugEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java index e0678672966..26b6e984d53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java @@ -67,7 +67,10 @@ public class HdfsFileStatus { this.modification_time = modification_time; this.access_time = access_time; this.permission = (permission == null) ? - FsPermission.getDefault() : permission; + ((isdir || symlink!=null) ? + FsPermission.getDefault() : + FsPermission.getFileDefault()) : + permission; this.owner = (owner == null) ? "" : owner; this.group = (group == null) ? "" : group; this.symlink = symlink; From 59d9d8bca93bf714f8ec846a27009e5690f1c05d Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Thu, 10 Jan 2013 00:50:03 +0000 Subject: [PATCH 07/31] MAPREDUCE-4907. TrackerDistributedCacheManager issues too many getFileStatus calls. (sandyr via tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431166 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../apache/hadoop/mapreduce/JobSubmitter.java | 3 +- .../ClientDistributedCacheManager.java | 89 ++++++++++++------- 3 files changed, 62 insertions(+), 33 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 65fbf1d6d63..5f15f5e9db5 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -204,6 +204,9 @@ Release 2.0.3-alpha - Unreleased MAPREDUCE-4920. Use security token protobuf definition from hadoop common. (Suresh Srinivas via vinodkv) + MAPREDUCE-4907. TrackerDistributedCacheManager issues too many getFileStatus + calls. (sandyr via tucu) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java index 62415f935f6..8fde50c68c8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java @@ -251,9 +251,8 @@ class JobSubmitter { } // set the timestamps of the archives and files - ClientDistributedCacheManager.determineTimestamps(conf); // set the public/private visibility of the archives and files - ClientDistributedCacheManager.determineCacheVisibilities(conf); + ClientDistributedCacheManager.determineTimestampsAndCacheVisibilities(conf); // get DelegationToken for each cached file ClientDistributedCacheManager.getDelegationTokens(conf, job .getCredentials()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java index 46d89eda130..23f3cfcadf5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java @@ -19,6 +19,8 @@ package org.apache.hadoop.mapreduce.filecache; import java.io.IOException; import java.net.URI; +import java.util.HashMap; +import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -37,6 +39,25 @@ import org.apache.hadoop.security.Credentials; @InterfaceAudience.Private public class ClientDistributedCacheManager { + /** + * Determines timestamps of files to be cached, and stores those + * in the configuration. Determines the visibilities of the distributed cache + * files and archives. The visibility of a cache path is "public" if the leaf + * component has READ permissions for others, and the parent subdirs have + * EXECUTE permissions for others. + * + * This is an internal method! + * + * @param job + * @throws IOException + */ + public static void determineTimestampsAndCacheVisibilities(Configuration job) + throws IOException { + Map statCache = new HashMap(); + determineTimestamps(job, statCache); + determineCacheVisibilities(job, statCache); + } + /** * Determines timestamps of files to be cached, and stores those * in the configuration. This is intended to be used internally by JobClient @@ -47,16 +68,17 @@ public class ClientDistributedCacheManager { * @param job Configuration of a job. * @throws IOException */ - public static void determineTimestamps(Configuration job) throws IOException { + public static void determineTimestamps(Configuration job, + Map statCache) throws IOException { URI[] tarchives = DistributedCache.getCacheArchives(job); if (tarchives != null) { - FileStatus status = getFileStatus(job, tarchives[0]); + FileStatus status = getFileStatus(job, tarchives[0], statCache); StringBuilder archiveFileSizes = new StringBuilder(String.valueOf(status.getLen())); StringBuilder archiveTimestamps = new StringBuilder(String.valueOf(status.getModificationTime())); for (int i = 1; i < tarchives.length; i++) { - status = getFileStatus(job, tarchives[i]); + status = getFileStatus(job, tarchives[i], statCache); archiveFileSizes.append(","); archiveFileSizes.append(String.valueOf(status.getLen())); archiveTimestamps.append(","); @@ -68,13 +90,13 @@ public class ClientDistributedCacheManager { URI[] tfiles = DistributedCache.getCacheFiles(job); if (tfiles != null) { - FileStatus status = getFileStatus(job, tfiles[0]); + FileStatus status = getFileStatus(job, tfiles[0], statCache); StringBuilder fileSizes = new StringBuilder(String.valueOf(status.getLen())); StringBuilder fileTimestamps = new StringBuilder(String.valueOf( status.getModificationTime())); for (int i = 1; i < tfiles.length; i++) { - status = getFileStatus(job, tfiles[i]); + status = getFileStatus(job, tfiles[i], statCache); fileSizes.append(","); fileSizes.append(String.valueOf(status.getLen())); fileTimestamps.append(","); @@ -123,25 +145,25 @@ public class ClientDistributedCacheManager { * @param job * @throws IOException */ - public static void determineCacheVisibilities(Configuration job) - throws IOException { + public static void determineCacheVisibilities(Configuration job, + Map statCache) throws IOException { URI[] tarchives = DistributedCache.getCacheArchives(job); if (tarchives != null) { StringBuilder archiveVisibilities = - new StringBuilder(String.valueOf(isPublic(job, tarchives[0]))); + new StringBuilder(String.valueOf(isPublic(job, tarchives[0], statCache))); for (int i = 1; i < tarchives.length; i++) { archiveVisibilities.append(","); - archiveVisibilities.append(String.valueOf(isPublic(job, tarchives[i]))); + archiveVisibilities.append(String.valueOf(isPublic(job, tarchives[i], statCache))); } setArchiveVisibilities(job, archiveVisibilities.toString()); } URI[] tfiles = DistributedCache.getCacheFiles(job); if (tfiles != null) { StringBuilder fileVisibilities = - new StringBuilder(String.valueOf(isPublic(job, tfiles[0]))); + new StringBuilder(String.valueOf(isPublic(job, tfiles[0], statCache))); for (int i = 1; i < tfiles.length; i++) { fileVisibilities.append(","); - fileVisibilities.append(String.valueOf(isPublic(job, tfiles[i]))); + fileVisibilities.append(String.valueOf(isPublic(job, tfiles[i], statCache))); } setFileVisibilities(job, fileVisibilities.toString()); } @@ -193,19 +215,13 @@ public class ClientDistributedCacheManager { } /** - * Returns {@link FileStatus} of a given cache file on hdfs. - * - * @param conf configuration - * @param cache cache file - * @return {@link FileStatus} of a given cache file on hdfs - * @throws IOException + * Gets the file status for the given URI. If the URI is in the cache, + * returns it. Otherwise, fetches it and adds it to the cache. */ - static FileStatus getFileStatus(Configuration conf, URI cache) - throws IOException { - FileSystem fileSystem = FileSystem.get(cache, conf); - Path filePath = new Path(cache.getPath()); - - return fileSystem.getFileStatus(filePath); + private static FileStatus getFileStatus(Configuration job, URI uri, + Map statCache) throws IOException { + FileSystem fileSystem = FileSystem.get(uri, job); + return getFileStatus(fileSystem, uri, statCache); } /** @@ -216,14 +232,15 @@ public class ClientDistributedCacheManager { * @return true if the path in the uri is visible to all, false otherwise * @throws IOException */ - static boolean isPublic(Configuration conf, URI uri) throws IOException { + static boolean isPublic(Configuration conf, URI uri, + Map statCache) throws IOException { FileSystem fs = FileSystem.get(uri, conf); Path current = new Path(uri.getPath()); //the leaf level file should be readable by others - if (!checkPermissionOfOther(fs, current, FsAction.READ)) { + if (!checkPermissionOfOther(fs, current, FsAction.READ, statCache)) { return false; } - return ancestorsHaveExecutePermissions(fs, current.getParent()); + return ancestorsHaveExecutePermissions(fs, current.getParent(), statCache); } /** @@ -231,12 +248,12 @@ public class ClientDistributedCacheManager { * permission set for all users (i.e. that other users can traverse * the directory heirarchy to the given path) */ - static boolean ancestorsHaveExecutePermissions(FileSystem fs, Path path) - throws IOException { + static boolean ancestorsHaveExecutePermissions(FileSystem fs, Path path, + Map statCache) throws IOException { Path current = path; while (current != null) { //the subdirs in the path should have execute permissions for others - if (!checkPermissionOfOther(fs, current, FsAction.EXECUTE)) { + if (!checkPermissionOfOther(fs, current, FsAction.EXECUTE, statCache)) { return false; } current = current.getParent(); @@ -254,8 +271,8 @@ public class ClientDistributedCacheManager { * @throws IOException */ private static boolean checkPermissionOfOther(FileSystem fs, Path path, - FsAction action) throws IOException { - FileStatus status = fs.getFileStatus(path); + FsAction action, Map statCache) throws IOException { + FileStatus status = getFileStatus(fs, path.toUri(), statCache); FsPermission perms = status.getPermission(); FsAction otherAction = perms.getOtherAction(); if (otherAction.implies(action)) { @@ -263,4 +280,14 @@ public class ClientDistributedCacheManager { } return false; } + + private static FileStatus getFileStatus(FileSystem fs, URI uri, + Map statCache) throws IOException { + FileStatus stat = statCache.get(uri); + if (stat == null) { + stat = fs.getFileStatus(new Path(uri)); + statCache.put(uri, stat); + } + return stat; + } } From 6449f524552f8c24d20b314ad21f6c579fa08e85 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Thu, 10 Jan 2013 02:30:05 +0000 Subject: [PATCH 08/31] HDFS-4032. Specify the charset explicitly rather than rely on the default. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431179 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/hdfs/DFSUtil.java | 25 ++++++------------- .../datatransfer/DataTransferEncryptor.java | 5 ++-- .../hadoop/hdfs/qjournal/server/Journal.java | 3 ++- .../hadoop/hdfs/server/common/JspHelper.java | 4 ++- .../hadoop/hdfs/server/common/Storage.java | 4 ++- .../fsdataset/impl/RollingLogsImpl.java | 19 +++++++++----- .../server/namenode/ClusterJspHelper.java | 4 ++- .../hdfs/server/namenode/FSNamesystem.java | 7 ++++-- .../namenode/RenewDelegationTokenServlet.java | 8 ++++-- .../web/resources/NamenodeWebHdfsMethods.java | 8 ++++-- .../hdfs/tools/DelegationTokenFetcher.java | 6 +++-- .../OfflineEditsXmlLoader.java | 10 +++++--- .../StatisticsEditsVisitor.java | 9 ++++--- .../TextWriterImageVisitor.java | 8 ++++-- .../apache/hadoop/hdfs/util/MD5FileUtils.java | 9 ++++--- .../hadoop/hdfs/util/PersistentLongFile.java | 10 ++++++-- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 4 ++- .../server/namenode/TestPathComponents.java | 4 ++- 19 files changed, 98 insertions(+), 52 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8e381dd686a..2aa917caeff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -467,6 +467,9 @@ Release 2.0.3-alpha - Unreleased HDFS-4035. LightWeightGSet and LightWeightHashSet increment a volatile without synchronization. (eli) + HDFS-4032. Specify the charset explicitly rather than rely on the + default. (eli) + HDFS-4363. Combine PBHelper and HdfsProtoUtil and remove redundant methods. (suresh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index e94d4a8b843..69ef0095db6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -80,6 +80,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; +import com.google.common.base.Charsets; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; @@ -222,12 +223,7 @@ public class DFSUtil { * Converts a string to a byte array using UTF8 encoding. */ public static byte[] string2Bytes(String str) { - try { - return str.getBytes("UTF8"); - } catch(UnsupportedEncodingException e) { - assert false : "UTF8 encoding is not supported "; - } - return null; + return str.getBytes(Charsets.UTF_8); } /** @@ -239,19 +235,14 @@ public class DFSUtil { if (pathComponents.length == 1 && pathComponents[0].length == 0) { return Path.SEPARATOR; } - try { - StringBuilder result = new StringBuilder(); - for (int i = 0; i < pathComponents.length; i++) { - result.append(new String(pathComponents[i], "UTF-8")); - if (i < pathComponents.length - 1) { - result.append(Path.SEPARATOR_CHAR); - } + StringBuilder result = new StringBuilder(); + for (int i = 0; i < pathComponents.length; i++) { + result.append(new String(pathComponents[i], Charsets.UTF_8)); + if (i < pathComponents.length - 1) { + result.append(Path.SEPARATOR_CHAR); } - return result.toString(); - } catch (UnsupportedEncodingException ex) { - assert false : "UTF8 encoding is not supported "; } - return null; + return result.toString(); } /** Convert an object representing a path to a string. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java index 229480b927b..f84bdf38ead 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.security.SaslInputStream; import org.apache.hadoop.security.SaslOutputStream; +import com.google.common.base.Charsets; import com.google.common.collect.Maps; import com.google.protobuf.ByteString; @@ -399,7 +400,7 @@ public class DataTransferEncryptor { DataEncryptionKey encryptionKey) { return encryptionKey.keyId + NAME_DELIMITER + encryptionKey.blockPoolId + NAME_DELIMITER + - new String(Base64.encodeBase64(encryptionKey.nonce, false)); + new String(Base64.encodeBase64(encryptionKey.nonce, false), Charsets.UTF_8); } /** @@ -427,7 +428,7 @@ public class DataTransferEncryptor { } private static char[] encryptionKeyToPassword(byte[] encryptionKey) { - return new String(Base64.encodeBase64(encryptionKey, false)).toCharArray(); + return new String(Base64.encodeBase64(encryptionKey, false), Charsets.UTF_8).toCharArray(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 9a15fc1b59d..38a58e89bcc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -58,6 +58,7 @@ import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.SecurityUtil; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Charsets; import com.google.common.base.Preconditions; import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableList; @@ -934,7 +935,7 @@ class Journal implements Closeable { fos.write('\n'); // Write human-readable data after the protobuf. This is only // to assist in debugging -- it's not parsed at all. - OutputStreamWriter writer = new OutputStreamWriter(fos); + OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8); writer.write(String.valueOf(newData)); writer.write('\n'); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index 1d9a8f0a1c1..b569ca686e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -70,6 +70,8 @@ import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.VersionInfo; +import com.google.common.base.Charsets; + import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER; import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER; @@ -229,7 +231,7 @@ public class JspHelper { } blockReader = null; s.close(); - out.print(HtmlQuoting.quoteHtmlChars(new String(buf))); + out.print(HtmlQuoting.quoteHtmlChars(new String(buf, Charsets.UTF_8))); } public static void addTableHeader(JspWriter out) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index cef7d237c24..fc69978fb22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -44,6 +44,8 @@ import org.apache.hadoop.util.VersionInfo; import com.google.common.base.Preconditions; +import com.google.common.base.Charsets; + /** @@ -658,7 +660,7 @@ public abstract class Storage extends StorageInfo { FileLock res = null; try { res = file.getChannel().tryLock(); - file.write(jvmName.getBytes()); + file.write(jvmName.getBytes(Charsets.UTF_8)); LOG.info("Lock on " + lockF + " acquired by nodename " + jvmName); } catch(OverlappingFileLockException oe) { LOG.error("It appears that another namenode " + file.readLine() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java index 48a9829d626..94898ee38d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java @@ -19,16 +19,20 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import java.io.BufferedReader; import java.io.File; +import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; -import java.io.FileReader; import java.io.IOException; -import java.io.PrintStream; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hdfs.server.datanode.DataBlockScanner; import org.apache.hadoop.hdfs.server.datanode.fsdataset.RollingLogs; +import com.google.common.base.Charsets; + class RollingLogsImpl implements RollingLogs { private static final String CURR_SUFFIX = ".curr"; private static final String PREV_SUFFIX = ".prev"; @@ -40,7 +44,7 @@ class RollingLogsImpl implements RollingLogs { private final File curr; private final File prev; - private PrintStream out; //require synchronized access + private PrintWriter out; //require synchronized access private Appender appender = new Appender() { @Override @@ -82,7 +86,8 @@ class RollingLogsImpl implements RollingLogs { RollingLogsImpl(String dir, String filePrefix) throws FileNotFoundException{ curr = new File(dir, filePrefix + CURR_SUFFIX); prev = new File(dir, filePrefix + PREV_SUFFIX); - out = new PrintStream(new FileOutputStream(curr, true)); + out = new PrintWriter(new OutputStreamWriter(new FileOutputStream( + curr, true), Charsets.UTF_8)); } @Override @@ -108,7 +113,8 @@ class RollingLogsImpl implements RollingLogs { synchronized(this) { appender.close(); final boolean renamed = curr.renameTo(prev); - out = new PrintStream(new FileOutputStream(curr, true)); + out = new PrintWriter(new OutputStreamWriter(new FileOutputStream( + curr, true), Charsets.UTF_8)); if (!renamed) { throw new IOException("Failed to rename " + curr + " to " + prev); } @@ -163,7 +169,8 @@ class RollingLogsImpl implements RollingLogs { reader = null; } - reader = new BufferedReader(new FileReader(file)); + reader = new BufferedReader(new InputStreamReader(new FileInputStream( + file), Charsets.UTF_8)); return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java index 78fa3d69ad9..1b3db818d15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java @@ -48,6 +48,8 @@ import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.type.TypeReference; import org.znerd.xmlenc.XMLOutputter; +import com.google.common.base.Charsets; + /** * This class generates the data that is needed to be displayed on cluster web * console. @@ -873,7 +875,7 @@ class ClusterJspHelper { URLConnection connection = url.openConnection(); BufferedReader in = new BufferedReader( new InputStreamReader( - connection.getInputStream())); + connection.getInputStream(), Charsets.UTF_8)); String inputLine; while ((inputLine = in.readLine()) != null) { out.append(inputLine); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 36e4770e6df..258ef9195a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -78,8 +78,10 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.File; import java.io.FileNotFoundException; +import java.io.FileOutputStream; import java.io.FileWriter; import java.io.IOException; +import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.StringWriter; import java.lang.management.ManagementFactory; @@ -204,6 +206,7 @@ import org.apache.hadoop.util.VersionInfo; import org.mortbay.util.ajax.JSON; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Charsets; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; @@ -1089,8 +1092,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, try { checkSuperuserPrivilege(); File file = new File(System.getProperty("hadoop.log.dir"), filename); - PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter(file, - true))); + PrintWriter out = new PrintWriter(new BufferedWriter( + new OutputStreamWriter(new FileOutputStream(file, true), Charsets.UTF_8))); metaSave(out); out.flush(); out.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java index cefd63e66c8..caec7659c8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java @@ -17,7 +17,8 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; -import java.io.PrintStream; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; import java.security.PrivilegedExceptionAction; import javax.servlet.ServletContext; @@ -32,6 +33,8 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import com.google.common.base.Charsets; + /** * Renew delegation tokens over http for use in hftp. */ @@ -73,7 +76,8 @@ public class RenewDelegationTokenServlet extends DfsServlet { return nn.getRpcServer().renewDelegationToken(token); } }); - PrintStream os = new PrintStream(resp.getOutputStream()); + final PrintWriter os = new PrintWriter(new OutputStreamWriter( + resp.getOutputStream(), Charsets.UTF_8)); os.println(result); os.close(); } catch(Exception e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 912dee10370..e93f26093d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -20,7 +20,8 @@ package org.apache.hadoop.hdfs.server.namenode.web.resources; import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; -import java.io.PrintStream; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; import java.net.InetAddress; import java.net.URI; import java.net.URISyntaxException; @@ -102,6 +103,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import com.google.common.base.Charsets; import com.sun.jersey.spi.container.ResourceFilters; /** Web-hdfs NameNode implementation. */ @@ -713,7 +715,8 @@ public class NamenodeWebHdfsMethods { return new StreamingOutput() { @Override public void write(final OutputStream outstream) throws IOException { - final PrintStream out = new PrintStream(outstream); + final PrintWriter out = new PrintWriter(new OutputStreamWriter( + outstream, Charsets.UTF_8)); out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\"" + FileStatus.class.getSimpleName() + "\":["); @@ -736,6 +739,7 @@ public class NamenodeWebHdfsMethods { out.println(); out.println("]}}"); + out.flush(); } }; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index 4c6d888b1d3..cf6f9d503d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -55,6 +55,8 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.GenericOptionsParser; +import com.google.common.base.Charsets; + /** * Fetch a DelegationToken from the current Namenode and store it in the * specified file. @@ -269,8 +271,8 @@ public class DelegationTokenFetcher { throw new IOException("Error renewing token: " + connection.getResponseMessage()); } - in = new BufferedReader(new InputStreamReader - (connection.getInputStream())); + in = new BufferedReader( + new InputStreamReader(connection.getInputStream(), Charsets.UTF_8)); long result = Long.parseLong(in.readLine()); in.close(); return result; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java index 0f53415e95c..95cc3b89120 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java @@ -18,9 +18,10 @@ package org.apache.hadoop.hdfs.tools.offlineEditsViewer; import java.io.File; +import java.io.FileInputStream; import java.io.FileNotFoundException; -import java.io.FileReader; import java.io.IOException; +import java.io.InputStreamReader; import java.util.Stack; import org.apache.hadoop.classification.InterfaceAudience; @@ -39,6 +40,8 @@ import org.xml.sax.XMLReader; import org.xml.sax.helpers.DefaultHandler; import org.xml.sax.helpers.XMLReaderFactory; +import com.google.common.base.Charsets; + /** * OfflineEditsXmlLoader walks an EditsVisitor over an OEV XML file */ @@ -48,7 +51,7 @@ class OfflineEditsXmlLoader extends DefaultHandler implements OfflineEditsLoader { private final boolean fixTxIds; private final OfflineEditsVisitor visitor; - private final FileReader fileReader; + private final InputStreamReader fileReader; private ParseState state; private Stanza stanza; private Stack stanzaStack; @@ -70,7 +73,8 @@ class OfflineEditsXmlLoader public OfflineEditsXmlLoader(OfflineEditsVisitor visitor, File inputFile, OfflineEditsViewer.Flags flags) throws FileNotFoundException { this.visitor = visitor; - this.fileReader = new FileReader(inputFile); + this.fileReader = + new InputStreamReader(new FileInputStream(inputFile), Charsets.UTF_8); this.fixTxIds = flags.getFixTxIds(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java index 3fd1dc26a0c..c173e170f0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java @@ -19,7 +19,8 @@ package org.apache.hadoop.hdfs.tools.offlineEditsViewer; import java.io.IOException; import java.io.OutputStream; -import java.io.PrintStream; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; import java.util.Map; import java.util.HashMap; @@ -29,6 +30,8 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; +import com.google.common.base.Charsets; + /** * StatisticsEditsVisitor implements text version of EditsVisitor * that aggregates counts of op codes processed @@ -37,7 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; @InterfaceAudience.Private @InterfaceStability.Unstable public class StatisticsEditsVisitor implements OfflineEditsVisitor { - final private PrintStream out; + final private PrintWriter out; private int version = -1; private final Map opCodeCount = @@ -52,7 +55,7 @@ public class StatisticsEditsVisitor implements OfflineEditsVisitor { * @param printToScreen Mirror output to screen? */ public StatisticsEditsVisitor(OutputStream out) throws IOException { - this.out = new PrintStream(out); + this.out = new PrintWriter(new OutputStreamWriter(out, Charsets.UTF_8)); } /** Start the visitor */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java index 88e66df62de..652d0100485 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java @@ -17,8 +17,12 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; +import java.io.FileOutputStream; import java.io.FileWriter; import java.io.IOException; +import java.io.OutputStreamWriter; + +import com.google.common.base.Charsets; /** * TextWriterImageProcessor mixes in the ability for ImageVisitor @@ -34,7 +38,7 @@ import java.io.IOException; abstract class TextWriterImageVisitor extends ImageVisitor { private boolean printToScreen = false; private boolean okToWrite = false; - final private FileWriter fw; + final private OutputStreamWriter fw; /** * Create a processor that writes to the file named. @@ -56,7 +60,7 @@ abstract class TextWriterImageVisitor extends ImageVisitor { throws IOException { super(); this.printToScreen = printToScreen; - fw = new FileWriter(filename); + fw = new OutputStreamWriter(new FileOutputStream(filename), Charsets.UTF_8); okToWrite = true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java index c010e2730a6..0d05be073f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java @@ -20,9 +20,9 @@ package org.apache.hadoop.hdfs.util; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; -import java.io.FileReader; import java.io.IOException; import java.io.InputStream; +import java.io.InputStreamReader; import java.security.DigestInputStream; import java.security.MessageDigest; import java.util.regex.Matcher; @@ -34,6 +34,8 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.StringUtils; +import com.google.common.base.Charsets; + /** * Static functions for dealing with files of the same format * that the Unix "md5sum" utility writes. @@ -78,7 +80,8 @@ public abstract class MD5FileUtils { } BufferedReader reader = - new BufferedReader(new FileReader(md5File)); + new BufferedReader(new InputStreamReader(new FileInputStream( + md5File), Charsets.UTF_8)); try { md5Line = reader.readLine(); if (md5Line == null) { md5Line = ""; } @@ -138,7 +141,7 @@ public abstract class MD5FileUtils { String md5Line = digestString + " *" + dataFile.getName() + "\n"; AtomicFileOutputStream afos = new AtomicFileOutputStream(md5File); - afos.write(md5Line.getBytes()); + afos.write(md5Line.getBytes(Charsets.UTF_8)); afos.close(); LOG.debug("Saved MD5 " + digest + " to " + md5File); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java index 292d0dfe63e..6ef047dbe9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java @@ -19,14 +19,18 @@ package org.apache.hadoop.hdfs.util; import java.io.BufferedReader; import java.io.File; +import java.io.FileInputStream; import java.io.FileReader; import java.io.IOException; +import java.io.InputStreamReader; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.IOUtils; +import com.google.common.base.Charsets; + /** * Class that represents a file on disk which persistently stores * a single long value. The file is updated atomically @@ -74,7 +78,7 @@ public class PersistentLongFile { public static void writeFile(File file, long val) throws IOException { AtomicFileOutputStream fos = new AtomicFileOutputStream(file); try { - fos.write(String.valueOf(val).getBytes()); + fos.write(String.valueOf(val).getBytes(Charsets.UTF_8)); fos.write('\n'); fos.close(); fos = null; @@ -88,7 +92,9 @@ public class PersistentLongFile { public static long readFile(File file, long defaultVal) throws IOException { long val = defaultVal; if (file.exists()) { - BufferedReader br = new BufferedReader(new FileReader(file)); + BufferedReader br = + new BufferedReader(new InputStreamReader(new FileInputStream( + file), Charsets.UTF_8)); try { val = Long.valueOf(br.readLine()); br.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 7304b1df966..6c8c29cd0fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -105,6 +105,8 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelect import org.apache.hadoop.util.Progressable; import org.mortbay.util.ajax.JSON; +import com.google.common.base.Charsets; + /** A FileSystem for HDFS over the web. */ public class WebHdfsFileSystem extends FileSystem implements DelegationTokenRenewer.Renewable { @@ -281,7 +283,7 @@ public class WebHdfsFileSystem extends FileSystem + "\" (parsed=\"" + parsed + "\")"); } } - return (Map)JSON.parse(new InputStreamReader(in)); + return (Map)JSON.parse(new InputStreamReader(in, Charsets.UTF_8)); } private static Map validateResponse(final HttpOpParam.Op op, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java index 9a712ef28b1..3daabb920c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java @@ -25,6 +25,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSUtil; import org.junit.Test; +import com.google.common.base.Charsets; + /** * @@ -45,7 +47,7 @@ public class TestPathComponents { String pathString = str; byte[][] oldPathComponents = INode.getPathComponents(pathString); byte[][] newPathComponents = - DFSUtil.bytes2byteArray(pathString.getBytes("UTF-8"), + DFSUtil.bytes2byteArray(pathString.getBytes(Charsets.UTF_8), (byte) Path.SEPARATOR_CHAR); if (oldPathComponents[0] == null) { assertTrue(oldPathComponents[0] == newPathComponents[0]); From d863f7a1e452ecb26c3cb92bc6c90e552731304b Mon Sep 17 00:00:00 2001 From: Thomas White Date: Thu, 10 Jan 2013 10:05:53 +0000 Subject: [PATCH 09/31] HADOOP-9183. Potential deadlock in ActiveStandbyElector. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431251 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 ++ .../hadoop/ha/ActiveStandbyElector.java | 35 ++++++------------- 2 files changed, 12 insertions(+), 25 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 618933b6c47..984f7acac7a 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -528,6 +528,8 @@ Release 2.0.3-alpha - Unreleased HADOOP-9155. FsPermission should have different default value, 777 for directory and 666 for file. (Binglin Chang via atm) + HADOOP-9183. Potential deadlock in ActiveStandbyElector. (tomwhite) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java index bb4d4b5809c..6badc5e84cc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java @@ -613,7 +613,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback { // Unfortunately, the ZooKeeper constructor connects to ZooKeeper and // may trigger the Connected event immediately. So, if we register the // watcher after constructing ZooKeeper, we may miss that event. Instead, - // we construct the watcher first, and have it queue any events it receives + // we construct the watcher first, and have it block any events it receives // before we can set its ZooKeeper reference. WatcherWithClientRef watcher = new WatcherWithClientRef(); ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, watcher); @@ -1002,19 +1002,17 @@ public class ActiveStandbyElector implements StatCallback, StringCallback { private CountDownLatch hasReceivedEvent = new CountDownLatch(1); /** - * If any events arrive before the reference to ZooKeeper is set, - * they get queued up and later forwarded when the reference is - * available. + * Latch used to wait until the reference to ZooKeeper is set. */ - private final List queuedEvents = Lists.newLinkedList(); + private CountDownLatch hasSetZooKeeper = new CountDownLatch(1); private WatcherWithClientRef() { } private WatcherWithClientRef(ZooKeeper zk) { - this.zk = zk; + setZooKeeperRef(zk); } - + /** * Waits for the next event from ZooKeeper to arrive. * @@ -1029,9 +1027,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback { if (!hasReceivedEvent.await(connectionTimeoutMs, TimeUnit.MILLISECONDS)) { LOG.error("Connection timed out: couldn't connect to ZooKeeper in " + connectionTimeoutMs + " milliseconds"); - synchronized (this) { - zk.close(); - } + zk.close(); throw KeeperException.create(Code.CONNECTIONLOSS); } } catch (InterruptedException e) { @@ -1041,29 +1037,18 @@ public class ActiveStandbyElector implements StatCallback, StringCallback { } } - private synchronized void setZooKeeperRef(ZooKeeper zk) { + private void setZooKeeperRef(ZooKeeper zk) { Preconditions.checkState(this.zk == null, "zk already set -- must be set exactly once"); this.zk = zk; - - for (WatchedEvent e : queuedEvents) { - forwardEvent(e); - } - queuedEvents.clear(); + hasSetZooKeeper.countDown(); } @Override - public synchronized void process(WatchedEvent event) { - if (zk != null) { - forwardEvent(event); - } else { - queuedEvents.add(event); - } - } - - private void forwardEvent(WatchedEvent event) { + public void process(WatchedEvent event) { hasReceivedEvent.countDown(); try { + hasSetZooKeeper.await(zkSessionTimeout, TimeUnit.MILLISECONDS); ActiveStandbyElector.this.processWatchEvent( zk, event); } catch (Throwable t) { From 2bd35d8de3b71489777684adb19beb1811b81538 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Thu, 10 Jan 2013 16:03:37 +0000 Subject: [PATCH 10/31] HDFS-4367. GetDataEncryptionKeyResponseProto does not handle null response. Contributed by Suresh Srinivas. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431459 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../ClientNamenodeProtocolServerSideTranslatorPB.java | 9 ++++++--- .../protocolPB/ClientNamenodeProtocolTranslatorPB.java | 8 +++++--- .../src/main/proto/ClientNamenodeProtocol.proto | 2 +- 4 files changed, 15 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2aa917caeff..6145c5dac39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -307,6 +307,9 @@ Release 2.0.3-alpha - Unreleased HDFS-4362. GetDelegationTokenResponseProto does not handle null token. (suresh) + HDFS-4367. GetDataEncryptionKeyResponseProto does not handle null + response. (suresh) + NEW FEATURES HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 82eb8169cec..31da5e82219 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -839,10 +839,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements RpcController controller, GetDataEncryptionKeyRequestProto request) throws ServiceException { try { + GetDataEncryptionKeyResponseProto.Builder builder = + GetDataEncryptionKeyResponseProto.newBuilder(); DataEncryptionKey encryptionKey = server.getDataEncryptionKey(); - return GetDataEncryptionKeyResponseProto.newBuilder() - .setDataEncryptionKey(PBHelper.convert(encryptionKey)) - .build(); + if (encryptionKey != null) { + builder.setDataEncryptionKey(PBHelper.convert(encryptionKey)); + } + return builder.build(); } catch (IOException e) { throw new ServiceException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 74927ea4ad8..5ac29826b46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDat import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto; @@ -111,7 +112,6 @@ import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RpcClientUtil; import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.security.token.Token; import com.google.protobuf.ByteString; @@ -819,8 +819,10 @@ public class ClientNamenodeProtocolTranslatorPB implements GetDataEncryptionKeyRequestProto req = GetDataEncryptionKeyRequestProto .newBuilder().build(); try { - return PBHelper.convert(rpcProxy.getDataEncryptionKey(null, req) - .getDataEncryptionKey()); + GetDataEncryptionKeyResponseProto rsp = + rpcProxy.getDataEncryptionKey(null, req); + return rsp.hasDataEncryptionKey() ? + PBHelper.convert(rsp.getDataEncryptionKey()) : null; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 8af4760b800..6bf5af34919 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -443,7 +443,7 @@ message GetDataEncryptionKeyRequestProto { // no parameters } message GetDataEncryptionKeyResponseProto { - required DataEncryptionKeyProto dataEncryptionKey = 1; + optional DataEncryptionKeyProto dataEncryptionKey = 1; } service ClientNamenodeProtocol { From d7c48409e339d9964ec7681b91a076fc5bfa2c87 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Thu, 10 Jan 2013 17:27:58 +0000 Subject: [PATCH 11/31] MAPREDUCE-4907. Ammendment, forgot to svn add testcase in original commit git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431510 13f79535-47bb-0310-9956-ffa450edef68 --- .../TestClientDistributedCacheManager.java | 114 ++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java new file mode 100644 index 00000000000..4824ba39e64 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.filecache; + +import java.io.File; +import java.io.IOException; +import java.net.URI; +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.SequenceFile.CompressionType; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.MRJobConfig; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class TestClientDistributedCacheManager { + private static final Log LOG = LogFactory.getLog( + TestClientDistributedCacheManager.class); + + private static final String TEST_ROOT_DIR = + new File(System.getProperty("test.build.data", "/tmp")).toURI() + .toString().replace(' ', '+'); + + private FileSystem fs; + private Path firstCacheFile; + private Path secondCacheFile; + private Configuration conf; + + @Before + public void setup() throws IOException { + conf = new Configuration(); + fs = FileSystem.get(conf); + firstCacheFile = new Path(TEST_ROOT_DIR, "firstcachefile"); + secondCacheFile = new Path(TEST_ROOT_DIR, "secondcachefile"); + createTempFile(firstCacheFile, conf); + createTempFile(secondCacheFile, conf); + } + + @After + public void tearDown() throws IOException { + if (!fs.delete(firstCacheFile, false)) { + LOG.warn("Failed to delete firstcachefile"); + } + if (!fs.delete(secondCacheFile, false)) { + LOG.warn("Failed to delete secondcachefile"); + } + } + + @Test + public void testDetermineTimestamps() throws IOException { + Job job = Job.getInstance(conf); + job.addCacheFile(firstCacheFile.toUri()); + job.addCacheFile(secondCacheFile.toUri()); + Configuration jobConf = job.getConfiguration(); + + Map statCache = new HashMap(); + ClientDistributedCacheManager.determineTimestamps(jobConf, statCache); + + FileStatus firstStatus = statCache.get(firstCacheFile.toUri()); + FileStatus secondStatus = statCache.get(secondCacheFile.toUri()); + + Assert.assertNotNull(firstStatus); + Assert.assertNotNull(secondStatus); + Assert.assertEquals(2, statCache.size()); + String expected = firstStatus.getModificationTime() + "," + + secondStatus.getModificationTime(); + Assert.assertEquals(expected, jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS)); + } + + @SuppressWarnings("deprecation") + void createTempFile(Path p, Configuration conf) throws IOException { + SequenceFile.Writer writer = null; + try { + writer = SequenceFile.createWriter(fs, conf, p, + Text.class, Text.class, + CompressionType.NONE); + writer.append(new Text("text"), new Text("moretext")); + } catch(Exception e) { + throw new IOException(e.getLocalizedMessage()); + } finally { + if (writer != null) { + writer.close(); + } + writer = null; + } + LOG.info("created: " + p); + } +} From be5509c53743a0beddda3f5798e72b919e797bd0 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Thu, 10 Jan 2013 23:20:46 +0000 Subject: [PATCH 12/31] HDFS-4382. Fix typo MAX_NOT_CHANGED_INTERATIONS. Contributed by Ted Yu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431726 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../java/org/apache/hadoop/hdfs/server/balancer/Balancer.java | 4 ++-- .../apache/hadoop/hdfs/server/balancer/NameNodeConnector.java | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 6145c5dac39..cab4a426cc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -297,6 +297,8 @@ Trunk (Unreleased) HDFS-4261. Fix bugs in Balaner causing infinite loop and TestBalancerWithNodeGroup timeing out. (Junping Du via szetszwo) + HDFS-4382. Fix typo MAX_NOT_CHANGED_INTERATIONS. (Ted Yu via suresh) + Release 2.0.3-alpha - Unreleased INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 049e6691913..b64844ba523 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -190,7 +190,7 @@ public class Balancer { * balancing purpose at a datanode */ public static final int MAX_NUM_CONCURRENT_MOVES = 5; - public static final int MAX_NO_PENDING_BLOCK_INTERATIONS = 5; + private static final int MAX_NO_PENDING_BLOCK_ITERATIONS = 5; private static final String USAGE = "Usage: java " + Balancer.class.getSimpleName() @@ -782,7 +782,7 @@ public class Balancer { noPendingBlockIteration++; // in case no blocks can be moved for source node's task, // jump out of while-loop after 5 iterations. - if (noPendingBlockIteration >= MAX_NO_PENDING_BLOCK_INTERATIONS) { + if (noPendingBlockIteration >= MAX_NO_PENDING_BLOCK_ITERATIONS) { scheduledSize = 0; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java index 530a3b7e781..afec92be611 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java @@ -52,7 +52,7 @@ import org.apache.hadoop.util.Daemon; class NameNodeConnector { private static final Log LOG = Balancer.LOG; private static final Path BALANCER_ID_PATH = new Path("/system/balancer.id"); - private static final int MAX_NOT_CHANGED_INTERATIONS = 5; + private static final int MAX_NOT_CHANGED_ITERATIONS = 5; final URI nameNodeUri; final String blockpoolID; @@ -127,7 +127,7 @@ class NameNodeConnector { notChangedIterations = 0; } else { notChangedIterations++; - if (notChangedIterations >= MAX_NOT_CHANGED_INTERATIONS) { + if (notChangedIterations >= MAX_NOT_CHANGED_ITERATIONS) { System.out.println("No block has been moved for " + notChangedIterations + " iterations. Exiting..."); return false; From a8d60f4190a3a5f7a88c04f30bf61052c53f2b44 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Thu, 10 Jan 2013 23:58:11 +0000 Subject: [PATCH 13/31] HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431739 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../apache/hadoop/io/compress/GzipCodec.java | 64 ++++++++++++++++++- 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 984f7acac7a..1d9febccf2e 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -148,6 +148,8 @@ Trunk (Unreleased) BUG FIXES + HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang) + HADOOP-9041. FsUrlStreamHandlerFactory could cause an infinite loop in FileSystem initialization. (Yanbo Liang and Radim Kolar via llu) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java index 520205e1660..6ac692c14e7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java @@ -40,14 +40,74 @@ public class GzipCodec extends DefaultCodec { protected static class GzipOutputStream extends CompressorStream { private static class ResetableGZIPOutputStream extends GZIPOutputStream { - + private static final int TRAILER_SIZE = 8; + public static final String JVMVendor= System.getProperty("java.vendor"); + public static final String JVMVersion= System.getProperty("java.version"); + private static final boolean HAS_BROKEN_FINISH = + (JVMVendor.contains("IBM") && JVMVersion.contains("1.6.0")); + public ResetableGZIPOutputStream(OutputStream out) throws IOException { super(out); } - + public void resetState() throws IOException { def.reset(); } + + /** + * Override this method for HADOOP-8419. + * Override because IBM implementation calls def.end() which + * causes problem when reseting the stream for reuse. + * + */ + @Override + public void finish() throws IOException { + if (HAS_BROKEN_FINISH) { + if (!def.finished()) { + def.finish(); + while (!def.finished()) { + int i = def.deflate(this.buf, 0, this.buf.length); + if ((def.finished()) && (i <= this.buf.length - TRAILER_SIZE)) { + writeTrailer(this.buf, i); + i += TRAILER_SIZE; + out.write(this.buf, 0, i); + + return; + } + if (i > 0) { + out.write(this.buf, 0, i); + } + } + + byte[] arrayOfByte = new byte[TRAILER_SIZE]; + writeTrailer(arrayOfByte, 0); + out.write(arrayOfByte); + } + } else { + super.finish(); + } + } + + /** re-implement for HADOOP-8419 because the relative method in jdk is invisible */ + private void writeTrailer(byte[] paramArrayOfByte, int paramInt) + throws IOException { + writeInt((int)this.crc.getValue(), paramArrayOfByte, paramInt); + writeInt(this.def.getTotalIn(), paramArrayOfByte, paramInt + 4); + } + + /** re-implement for HADOOP-8419 because the relative method in jdk is invisible */ + private void writeInt(int paramInt1, byte[] paramArrayOfByte, int paramInt2) + throws IOException { + writeShort(paramInt1 & 0xFFFF, paramArrayOfByte, paramInt2); + writeShort(paramInt1 >> 16 & 0xFFFF, paramArrayOfByte, paramInt2 + 2); + } + + /** re-implement for HADOOP-8419 because the relative method in jdk is invisible */ + private void writeShort(int paramInt1, byte[] paramArrayOfByte, int paramInt2) + throws IOException { + paramArrayOfByte[paramInt2] = (byte)(paramInt1 & 0xFF); + paramArrayOfByte[(paramInt2 + 1)] = (byte)(paramInt1 >> 8 & 0xFF); + } } public GzipOutputStream(OutputStream out) throws IOException { From 80648492036d7f0aaa72082a875efc7cba500782 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Fri, 11 Jan 2013 00:00:13 +0000 Subject: [PATCH 14/31] HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431740 13f79535-47bb-0310-9956-ffa450edef68 --- .../compress/TestCompressionStreamReuse.java | 161 ++++++++++++++++++ 1 file changed, 161 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java new file mode 100644 index 00000000000..2c285944539 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java @@ -0,0 +1,161 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.io.compress; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.Random; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.RandomDatum; +import org.apache.hadoop.io.compress.zlib.ZlibFactory; +import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel; +import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy; +import org.apache.hadoop.util.ReflectionUtils; + +import junit.framework.TestCase; + +public class TestCompressionStreamReuse extends TestCase { + private static final Log LOG = LogFactory + .getLog(TestCompressionStreamReuse.class); + + private Configuration conf = new Configuration(); + private int count = 10000; + private int seed = new Random().nextInt(); + + public void testBZip2Codec() throws IOException { + resetStateTest(conf, seed, count, + "org.apache.hadoop.io.compress.BZip2Codec"); + } + + public void testGzipCompressStreamReuse() throws IOException { + resetStateTest(conf, seed, count, + "org.apache.hadoop.io.compress.GzipCodec"); + } + + public void testGzipCompressStreamReuseWithParam() throws IOException { + Configuration conf = new Configuration(this.conf); + ZlibFactory + .setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION); + ZlibFactory.setCompressionStrategy(conf, + CompressionStrategy.HUFFMAN_ONLY); + resetStateTest(conf, seed, count, + "org.apache.hadoop.io.compress.GzipCodec"); + } + + private static void resetStateTest(Configuration conf, int seed, int count, + String codecClass) throws IOException { + // Create the codec + CompressionCodec codec = null; + try { + codec = (CompressionCodec) ReflectionUtils.newInstance(conf + .getClassByName(codecClass), conf); + } catch (ClassNotFoundException cnfe) { + throw new IOException("Illegal codec!"); + } + LOG.info("Created a Codec object of type: " + codecClass); + + // Generate data + DataOutputBuffer data = new DataOutputBuffer(); + RandomDatum.Generator generator = new RandomDatum.Generator(seed); + for (int i = 0; i < count; ++i) { + generator.next(); + RandomDatum key = generator.getKey(); + RandomDatum value = generator.getValue(); + + key.write(data); + value.write(data); + } + LOG.info("Generated " + count + " records"); + + // Compress data + DataOutputBuffer compressedDataBuffer = new DataOutputBuffer(); + DataOutputStream deflateOut = new DataOutputStream( + new BufferedOutputStream(compressedDataBuffer)); + CompressionOutputStream deflateFilter = codec + .createOutputStream(deflateOut); + deflateFilter.write(data.getData(), 0, data.getLength()); + deflateFilter.finish(); + deflateFilter.flush(); + LOG.info("Finished compressing data"); + + // reset deflator + deflateFilter.resetState(); + LOG.info("Finished reseting deflator"); + + // re-generate data + data.reset(); + generator = new RandomDatum.Generator(seed); + for (int i = 0; i < count; ++i) { + generator.next(); + RandomDatum key = generator.getKey(); + RandomDatum value = generator.getValue(); + + key.write(data); + value.write(data); + } + DataInputBuffer originalData = new DataInputBuffer(); + DataInputStream originalIn = new DataInputStream( + new BufferedInputStream(originalData)); + originalData.reset(data.getData(), 0, data.getLength()); + + // re-compress data + compressedDataBuffer.reset(); + deflateOut = new DataOutputStream(new BufferedOutputStream( + compressedDataBuffer)); + deflateFilter = codec.createOutputStream(deflateOut); + + deflateFilter.write(data.getData(), 0, data.getLength()); + deflateFilter.finish(); + deflateFilter.flush(); + LOG.info("Finished re-compressing data"); + + // De-compress data + DataInputBuffer deCompressedDataBuffer = new DataInputBuffer(); + deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0, + compressedDataBuffer.getLength()); + CompressionInputStream inflateFilter = codec + .createInputStream(deCompressedDataBuffer); + DataInputStream inflateIn = new DataInputStream( + new BufferedInputStream(inflateFilter)); + + // Check + for (int i = 0; i < count; ++i) { + RandomDatum k1 = new RandomDatum(); + RandomDatum v1 = new RandomDatum(); + k1.readFields(originalIn); + v1.readFields(originalIn); + + RandomDatum k2 = new RandomDatum(); + RandomDatum v2 = new RandomDatum(); + k2.readFields(inflateIn); + v2.readFields(inflateIn); + assertTrue( + "original and compressed-then-decompressed-output not equal", + k1.equals(k2) && v1.equals(v2)); + } + LOG.info("SUCCESS! Completed checking " + count + " records"); + } +} From 82b218c605add2f5cf01c5aa21ab8c55fa5967fd Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 11 Jan 2013 00:09:37 +0000 Subject: [PATCH 15/31] HDFS-4377. Some trivial DN comment cleanup. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431753 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../server/blockmanagement/BlockManager.java | 19 ++++++------ .../hadoop/hdfs/server/datanode/DataNode.java | 30 +++++++++---------- .../hdfs/server/datanode/DataStorage.java | 14 ++++----- 4 files changed, 32 insertions(+), 33 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index cab4a426cc5..390bfe55fd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -478,6 +478,8 @@ Release 2.0.3-alpha - Unreleased HDFS-4363. Combine PBHelper and HdfsProtoUtil and remove redundant methods. (suresh) + HDFS-4377. Some trivial DN comment cleanup. (eli) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 26e88ce4695..c56f353db35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -171,20 +171,19 @@ public class BlockManager { */ private final Set postponedMisreplicatedBlocks = Sets.newHashSet(); - // - // Keeps a TreeSet for every named node. Each treeset contains - // a list of the blocks that are "extra" at that location. We'll - // eventually remove these extras. - // Mapping: StorageID -> TreeSet - // + /** + * Maps a StorageID to the set of blocks that are "extra" for this + * DataNode. We'll eventually remove these extras. + */ public final Map> excessReplicateMap = new TreeMap>(); - // - // Store set of Blocks that need to be replicated 1 or more times. - // We also store pending replication-orders. - // + /** + * Store set of Blocks that need to be replicated 1 or more times. + * We also store pending replication-orders. + */ public final UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks(); + @VisibleForTesting final PendingReplicationBlocks pendingReplications; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 58bdedc72ec..c1845fd152b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -970,29 +970,27 @@ public class DataNode extends Configured dnId.setStorageID(createNewStorageId(dnId.getXferPort())); } + /** + * @return a unique storage ID of form "DS-randInt-ipaddr-port-timestamp" + */ static String createNewStorageId(int port) { - /* Return - * "DS-randInt-ipaddr-currentTimeMillis" - * It is considered extermely rare for all these numbers to match - * on a different machine accidentally for the following - * a) SecureRandom(INT_MAX) is pretty much random (1 in 2 billion), and - * b) Good chance ip address would be different, and - * c) Even on the same machine, Datanode is designed to use different ports. - * d) Good chance that these are started at different times. - * For a confict to occur all the 4 above have to match!. - * The format of this string can be changed anytime in future without - * affecting its functionality. - */ + // It is unlikely that we will create a non-unique storage ID + // for the following reasons: + // a) SecureRandom is a cryptographically strong random number generator + // b) IP addresses will likely differ on different hosts + // c) DataNode xfer ports will differ on the same host + // d) StorageIDs will likely be generated at different times (in ms) + // A conflict requires that all four conditions are violated. + // NB: The format of this string can be changed in the future without + // requiring that old SotrageIDs be updated. String ip = "unknownIP"; try { ip = DNS.getDefaultIP("default"); } catch (UnknownHostException ignored) { - LOG.warn("Could not find ip address of \"default\" inteface."); + LOG.warn("Could not find an IP address for the \"default\" inteface."); } - int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE); - return "DS-" + rand + "-" + ip + "-" + port + "-" - + Time.now(); + return "DS-" + rand + "-" + ip + "-" + port + "-" + Time.now(); } /** Ensure the authentication method is kerberos */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index cd6b7ccd910..9d31ffa673e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -62,7 +62,7 @@ import org.apache.hadoop.util.DiskChecker; */ @InterfaceAudience.Private public class DataStorage extends Storage { - // Constants + public final static String BLOCK_SUBDIR_PREFIX = "subdir"; final static String BLOCK_FILE_PREFIX = "blk_"; final static String COPY_FILE_PREFIX = "dncp_"; @@ -71,13 +71,13 @@ public class DataStorage extends Storage { public final static String STORAGE_DIR_FINALIZED = "finalized"; public final static String STORAGE_DIR_TMP = "tmp"; - /** Access to this variable is guarded by "this" */ + /** Unique storage ID. {@see DataNode#createNewStorageId(int)} for details */ private String storageID; - // flag to ensure initialzing storage occurs only once - private boolean initilized = false; + // Flag to ensure we only initialize storage once + private boolean initialized = false; - // BlockPoolStorage is map of + // Maps block pool IDs to block pool storage private Map bpStorageMap = Collections.synchronizedMap(new HashMap()); @@ -130,7 +130,7 @@ public class DataStorage extends Storage { synchronized void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo, Collection dataDirs, StartupOption startOpt) throws IOException { - if (initilized) { + if (initialized) { // DN storage has been initialized, no need to do anything return; } @@ -200,7 +200,7 @@ public class DataStorage extends Storage { this.writeAll(); // 4. mark DN storage is initilized - this.initilized = true; + this.initialized = true; } /** From 91c28d440952bdd50f10ff2d892182b8cd7a8065 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Fri, 11 Jan 2013 03:44:36 +0000 Subject: [PATCH 16/31] HDFS-4328. TestLargeBlock#testLargeBlockSize is timing out. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1431867 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/datanode/BlockSender.java | 23 +++++++++++-------- .../hdfs/util/DataTransferThrottler.java | 7 +++++- .../apache/hadoop/hdfs/TestLargeBlock.java | 2 +- 4 files changed, 23 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 390bfe55fd9..48f351821fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -299,6 +299,9 @@ Trunk (Unreleased) HDFS-4382. Fix typo MAX_NOT_CHANGED_INTERATIONS. (Ted Yu via suresh) + HDFS-4328. TestLargeBlock#testLargeBlockSize is timing out. (Chris Nauroth + via atm) + Release 2.0.3-alpha - Unreleased INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 49eb7dc3acc..bbcb2dd2e1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -648,7 +648,7 @@ class BlockSender implements java.io.Closeable { ByteBuffer pktBuf = ByteBuffer.allocate(pktBufSize); - while (endOffset > offset) { + while (endOffset > offset && !Thread.currentThread().isInterrupted()) { manageOsCache(); long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, throttler); @@ -656,16 +656,19 @@ class BlockSender implements java.io.Closeable { totalRead += len + (numberOfChunks(len) * checksumSize); seqno++; } - try { - // send an empty packet to mark the end of the block - sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, - throttler); - out.flush(); - } catch (IOException e) { //socket error - throw ioeToSocketException(e); - } + // If this thread was interrupted, then it did not send the full block. + if (!Thread.currentThread().isInterrupted()) { + try { + // send an empty packet to mark the end of the block + sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, + throttler); + out.flush(); + } catch (IOException e) { //socket error + throw ioeToSocketException(e); + } - sentEntireByteRange = true; + sentEntireByteRange = true; + } } finally { if (clientTraceFmt != null) { final long endTime = System.nanoTime(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java index a4f04a68d44..d00d4341b94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java @@ -96,7 +96,12 @@ public class DataTransferThrottler { // Wait for next period so that curReserve can be increased. try { wait( curPeriodEnd - now ); - } catch (InterruptedException ignored) {} + } catch (InterruptedException e) { + // Abort throttle and reset interrupted status to make sure other + // interrupt handling higher in the call stack executes. + Thread.currentThread().interrupt(); + break; + } } else if ( now < (curPeriodStart + periodExtension)) { curPeriodStart = curPeriodEnd; curReserve += bytesPerPeriod; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java index 3ca1cf3ec26..9563361094c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java @@ -158,7 +158,7 @@ public class TestLargeBlock { * Test for block size of 2GB + 512B * @throws IOException in case of errors */ - @Test + @Test(timeout = 120000) public void testLargeBlockSize() throws IOException { final long blockSize = 2L * 1024L * 1024L * 1024L + 512L; // 2GB + 512B runTest(blockSize); From bbc21ad5d491810b968c5d2a4df9f6c7a8fe29b8 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Fri, 11 Jan 2013 16:57:12 +0000 Subject: [PATCH 17/31] HDFS-4381. Document fsimage format details in FSImageFormat class javadoc. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432149 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/namenode/FSImageFormat.java | 52 ++++++++++++++++++- 2 files changed, 54 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 48f351821fa..1f191d20fc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -483,6 +483,9 @@ Release 2.0.3-alpha - Unreleased HDFS-4377. Some trivial DN comment cleanup. (eli) + HDFS-4381. Document fsimage format details in FSImageFormat class javadoc. + (Jing Zhao via suresh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 34f133234b4..7971de3df71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -49,7 +49,57 @@ import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.Text; /** - * Contains inner classes for reading or writing the on-disk format for FSImages. + * Contains inner classes for reading or writing the on-disk format for + * FSImages. + * + * In particular, the format of the FSImage looks like: + *
+ * FSImage {
+ *   LayoutVersion: int, NamespaceID: int, NumberItemsInFSDirectoryTree: long,
+ *   NamesystemGenerationStamp: long, TransactionID: long
+ *   {FSDirectoryTree, FilesUnderConstruction, SecretManagerState} (can be compressed)
+ * }
+ * 
+ * FSDirectoryTree (if {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is supported) {
+ *   INodeInfo of root, NumberOfChildren of root: int
+ *   [list of INodeInfo of root's children],
+ *   [list of INodeDirectoryInfo of root's directory children]
+ * }
+ * 
+ * FSDirectoryTree (if {@link Feature#FSIMAGE_NAME_OPTIMIZATION} not supported){
+ *   [list of INodeInfo of INodes in topological order]
+ * }
+ * 
+ * INodeInfo {
+ *   {
+ *     LocalName: short + byte[]
+ *   } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is supported
+ *   or 
+ *   {
+ *     FullPath: byte[]
+ *   } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is not supported
+ *   ReplicationFactor: short, ModificationTime: long,
+ *   AccessTime: long, PreferredBlockSize: long,
+ *   NumberOfBlocks: int (-1 for INodeDirectory, -2 for INodeSymLink),
+ *   { 
+ *     NsQuota: long, DsQuota: long, FsPermission: short, PermissionStatus
+ *   } for INodeDirectory
+ *   or 
+ *   {
+ *     SymlinkString, FsPermission: short, PermissionStatus
+ *   } for INodeSymlink
+ *   or
+ *   {
+ *     [list of BlockInfo], FsPermission: short, PermissionStatus
+ *   } for INodeFile
+ * }
+ * 
+ * INodeDirectoryInfo {
+ *   FullPath of the directory: short + byte[],
+ *   NumberOfChildren: int, [list of INodeInfo of children INode]
+ *   [list of INodeDirectoryInfo of the directory children]
+ * }
+ * 
*/ @InterfaceAudience.Private @InterfaceStability.Evolving From 6b3006cb32d9fc8d206bcd1d99163d391538e435 Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 11 Jan 2013 16:59:32 +0000 Subject: [PATCH 18/31] HADOOP-9139 improve killKdc.sh (Ivan A. Veselovsky via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432151 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 ++ .../src/test/resources/kdc/killKdc.sh | 18 +++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 1d9febccf2e..8f0d1e72113 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -311,6 +311,8 @@ Trunk (Unreleased) HADOOP-8957 AbstractFileSystem#IsValidName should be overridden for embedded file systems like ViewFs (Chris Nauroth via Sanjay Radia) + HADOOP-9139 improve killKdc.sh (Ivan A. Veselovsky via bobby) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd) diff --git a/hadoop-common-project/hadoop-common/src/test/resources/kdc/killKdc.sh b/hadoop-common-project/hadoop-common/src/test/resources/kdc/killKdc.sh index f5561d4b787..a6a3d77a3e5 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/kdc/killKdc.sh +++ b/hadoop-common-project/hadoop-common/src/test/resources/kdc/killKdc.sh @@ -1,3 +1,19 @@ #!/bin/sh -ps -ef | grep apacheds | grep -v grep | cut -f4 -d ' ' |xargs kill -9 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ps -ef | grep apacheds | grep -v grep | awk '{printf $2"\n"}' | xargs -t --no-run-if-empty kill -9 From ca93aaf7995d4fbdd8d1c2dcc67b66715c3f936f Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Fri, 11 Jan 2013 17:10:00 +0000 Subject: [PATCH 19/31] HADOOP-9192. Move token related request/response messages to common. Contributed by Suresh Srinivas. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432158 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../src/main/proto/Security.proto | 23 +++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 8f0d1e72113..2ca826bfb99 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -430,6 +430,9 @@ Release 2.0.3-alpha - Unreleased HADOOP-9119. Add test to FileSystemContractBaseTest to verify integrity of overwritten files. (Steve Loughran via suresh) + HADOOP-9192. Move token related request/response messages to common. + (suresh) + OPTIMIZATIONS HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang diff --git a/hadoop-common-project/hadoop-common/src/main/proto/Security.proto b/hadoop-common-project/hadoop-common/src/main/proto/Security.proto index 0aab15a544f..961ee9d7885 100644 --- a/hadoop-common-project/hadoop-common/src/main/proto/Security.proto +++ b/hadoop-common-project/hadoop-common/src/main/proto/Security.proto @@ -32,3 +32,26 @@ message TokenProto { required string service = 4; } +message GetDelegationTokenRequestProto { + required string renewer = 1; +} + +message GetDelegationTokenResponseProto { + optional hadoop.common.TokenProto token = 1; +} + +message RenewDelegationTokenRequestProto { + required hadoop.common.TokenProto token = 1; +} + +message RenewDelegationTokenResponseProto { + required uint64 newExpiryTime = 1; +} + +message CancelDelegationTokenRequestProto { + required hadoop.common.TokenProto token = 1; +} + +message CancelDelegationTokenResponseProto { // void response +} + From ebfa3ab1fad3aec5cc1ce0a8496b85da6bcaf064 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 11 Jan 2013 18:47:52 +0000 Subject: [PATCH 20/31] HDFS-4384. test_libhdfs_threaded gets SEGV if JNIEnv cannot be initialized. Contributed by Colin Patrick McCabe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432221 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1f191d20fc3..d978137e988 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -698,6 +698,8 @@ Release 2.0.3-alpha - Unreleased HDFS-4306. PBHelper.convertLocatedBlock miss convert BlockToken. (Binglin Chang via atm) + HDFS-4387. test_libhdfs_threaded SEGV on OpenJDK 7. (Colin McCabe via eli) + BREAKDOWN OF HDFS-3077 SUBTASKS HDFS-3077. Quorum-based protocol for reading and writing edit logs. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c index a1e786450f0..a1476ca18f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c @@ -52,7 +52,7 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf) if (!env) { fprintf(stderr, "nmdCreate: unable to construct JNIEnv.\n"); - goto error; + return NULL; } cl = calloc(1, sizeof(struct NativeMiniDfsCluster)); if (!cl) { From c4976aee4b1db9a88f17e0a4e706130b7a823408 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 11 Jan 2013 18:56:23 +0000 Subject: [PATCH 21/31] Update CHANGES.txt to move HDFS-4328. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432227 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d978137e988..d4371569965 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -299,9 +299,6 @@ Trunk (Unreleased) HDFS-4382. Fix typo MAX_NOT_CHANGED_INTERATIONS. (Ted Yu via suresh) - HDFS-4328. TestLargeBlock#testLargeBlockSize is timing out. (Chris Nauroth - via atm) - Release 2.0.3-alpha - Unreleased INCOMPATIBLE CHANGES @@ -698,7 +695,11 @@ Release 2.0.3-alpha - Unreleased HDFS-4306. PBHelper.convertLocatedBlock miss convert BlockToken. (Binglin Chang via atm) - HDFS-4387. test_libhdfs_threaded SEGV on OpenJDK 7. (Colin McCabe via eli) + HDFS-4384. test_libhdfs_threaded gets SEGV if JNIEnv cannot be + initialized. (Colin Patrick McCabe via eli) + + HDFS-4328. TestLargeBlock#testLargeBlockSize is timing out. (Chris Nauroth + via atm) BREAKDOWN OF HDFS-3077 SUBTASKS From 12293f8a13054eb12ee8c346aeb24a211a0673de Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 11 Jan 2013 19:00:51 +0000 Subject: [PATCH 22/31] MAPREDUCE-4921. JobClient should acquire HS token with RM principal (daryn via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432230 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 ++ .../org/apache/hadoop/mapred/JobClient.java | 7 --- .../org/apache/hadoop/mapred/YARNRunner.java | 12 ++--- .../v2 => mapred}/TestYARNRunner.java | 50 ++++++++++++++++++- 4 files changed, 58 insertions(+), 14 deletions(-) rename hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/{mapreduce/v2 => mapred}/TestYARNRunner.java (85%) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 5f15f5e9db5..6ec1fcfb8cb 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -691,6 +691,9 @@ Release 0.23.6 - UNRELEASED MAPREDUCE-4848. TaskAttemptContext cast error during AM recovery (Jerry Chen via jlowe) + MAPREDUCE-4921. JobClient should acquire HS token with RM principal + (daryn via bobby) + Release 0.23.5 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java index e441f71cdab..70bcbc56aa4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java @@ -144,13 +144,9 @@ public class JobClient extends CLI { * we have to add this hack. */ private boolean getDelegationTokenCalled = false; - /* notes the renewer that will renew the delegation token */ - private String dtRenewer = null; /* do we need a HS delegation token for this client */ static final String HS_DELEGATION_TOKEN_REQUIRED = "mapreduce.history.server.delegationtoken.required"; - static final String HS_DELEGATION_TOKEN_RENEWER - = "mapreduce.history.server.delegationtoken.renewer"; static{ ConfigUtil.loadResources(); @@ -576,8 +572,6 @@ public class JobClient extends CLI { if (getDelegationTokenCalled) { conf.setBoolean(HS_DELEGATION_TOKEN_REQUIRED, getDelegationTokenCalled); getDelegationTokenCalled = false; - conf.set(HS_DELEGATION_TOKEN_RENEWER, dtRenewer); - dtRenewer = null; } Job job = clientUgi.doAs(new PrivilegedExceptionAction () { @Override @@ -1180,7 +1174,6 @@ public class JobClient extends CLI { public Token getDelegationToken(final Text renewer) throws IOException, InterruptedException { getDelegationTokenCalled = true; - dtRenewer = renewer.toString(); return clientUgi.doAs(new PrivilegedExceptionAction>() { public Token run() throws IOException, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java index 98c94fab804..427f5a03f88 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java @@ -85,6 +85,7 @@ import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.ProtoUtils; +import com.google.common.annotations.VisibleForTesting; /** * This class enables the current JobClient (0.22 hadoop) to run on YARN. @@ -184,12 +185,12 @@ public class YARNRunner implements ClientProtocol { return resMgrDelegate.getClusterMetrics(); } - private Token getDelegationTokenFromHS( - MRClientProtocol hsProxy, Text renewer) throws IOException, - InterruptedException { + @VisibleForTesting + Token getDelegationTokenFromHS(MRClientProtocol hsProxy) + throws IOException, InterruptedException { GetDelegationTokenRequest request = recordFactory .newRecordInstance(GetDelegationTokenRequest.class); - request.setRenewer(renewer.toString()); + request.setRenewer(Master.getMasterPrincipal(conf)); DelegationToken mrDelegationToken = hsProxy.getDelegationToken(request) .getDelegationToken(); return ProtoUtils.convertFromProtoFormat(mrDelegationToken, @@ -269,8 +270,7 @@ public class YARNRunner implements ClientProtocol { // the delegation tokens for the HistoryServer also. if (conf.getBoolean(JobClient.HS_DELEGATION_TOKEN_REQUIRED, DEFAULT_HS_DELEGATION_TOKEN_REQUIRED)) { - Token hsDT = getDelegationTokenFromHS(hsProxy, new Text( - conf.get(JobClient.HS_DELEGATION_TOKEN_RENEWER))); + Token hsDT = getDelegationTokenFromHS(hsProxy); ts.addToken(hsDT.getService(), hsDT); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java similarity index 85% rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java index aa844c025f1..6a67bbd3a6d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestYARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.mapreduce.v2; +package org.apache.hadoop.mapred; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doAnswer; @@ -29,6 +29,8 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.security.PrivilegedExceptionAction; import java.util.List; import junit.framework.TestCase; @@ -41,6 +43,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.ClientCache; import org.apache.hadoop.mapred.ClientServiceDelegate; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Master; import org.apache.hadoop.mapred.ResourceMgrDelegate; import org.apache.hadoop.mapred.YARNRunner; import org.apache.hadoop.mapreduce.JobID; @@ -48,7 +51,11 @@ import org.apache.hadoop.mapreduce.JobPriority; import org.apache.hadoop.mapreduce.JobStatus.State; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TypeConverter; +import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse; import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; @@ -69,6 +76,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.api.records.YarnApplicationState; @@ -245,6 +253,46 @@ public class TestYARNRunner extends TestCase { verify(clientRMProtocol).getQueueUserAcls(any(GetQueueUserAclsInfoRequest.class)); } + @Test + public void testHistoryServerToken() throws Exception { + final String masterPrincipal = Master.getMasterPrincipal(conf); + + final MRClientProtocol hsProxy = mock(MRClientProtocol.class); + when(hsProxy.getDelegationToken(any(GetDelegationTokenRequest.class))).thenAnswer( + new Answer() { + public GetDelegationTokenResponse answer(InvocationOnMock invocation) { + GetDelegationTokenRequest request = + (GetDelegationTokenRequest)invocation.getArguments()[0]; + // check that the renewer matches the cluster's RM principal + assertEquals(request.getRenewer(), masterPrincipal); + + DelegationToken token = + recordFactory.newRecordInstance(DelegationToken.class); + // none of these fields matter for the sake of the test + token.setKind(""); + token.setService(""); + token.setIdentifier(ByteBuffer.allocate(0)); + token.setPassword(ByteBuffer.allocate(0)); + GetDelegationTokenResponse tokenResponse = + recordFactory.newRecordInstance(GetDelegationTokenResponse.class); + tokenResponse.setDelegationToken(token); + return tokenResponse; + } + }); + + UserGroupInformation.createRemoteUser("someone").doAs( + new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + yarnRunner = new YARNRunner(conf, null, null); + yarnRunner.getDelegationTokenFromHS(hsProxy); + verify(hsProxy). + getDelegationToken(any(GetDelegationTokenRequest.class)); + return null; + } + }); + } + @Test public void testAMAdminCommandOpts() throws Exception { JobConf jobConf = new JobConf(); From e1a30433824c89f4b21e9cbbec2bc529e2ac50e2 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 11 Jan 2013 22:19:24 +0000 Subject: [PATCH 23/31] Update CHANGES.txt to reflect HDFS-4274 merge. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432316 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d4371569965..13eab180007 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -285,9 +285,6 @@ Trunk (Unreleased) HDFS-4310. fix test org.apache.hadoop.hdfs.server.datanode. TestStartSecureDataNode (Ivan A. Veselovsky via atm) - HDFS-4274. BlockPoolSliceScanner does not close verification log during - shutdown. (Chris Nauroth via suresh) - HDFS-4275. MiniDFSCluster-based tests fail on Windows due to failure to delete test namenode directory. (Chris Nauroth via suresh) @@ -701,6 +698,9 @@ Release 2.0.3-alpha - Unreleased HDFS-4328. TestLargeBlock#testLargeBlockSize is timing out. (Chris Nauroth via atm) + HDFS-4274. BlockPoolSliceScanner does not close verification log during + shutdown. (Chris Nauroth via suresh) + BREAKDOWN OF HDFS-3077 SUBTASKS HDFS-3077. Quorum-based protocol for reading and writing edit logs. From 06406d705677845e1e303550e3bb0e2d4ccdbf70 Mon Sep 17 00:00:00 2001 From: Konstantin Shvachko Date: Sun, 13 Jan 2013 01:13:29 +0000 Subject: [PATCH 24/31] HDFS-1245. Plugable block id generation. Contributed by Konstantin Shvachko. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432539 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/util/IdGenerator.java | 31 +++++++++++ .../apache/hadoop/util/SequentialNumber.java | 2 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hadoop/hdfs/server/namenode/FSImage.java | 17 ++++++ .../hdfs/server/namenode/FSNamesystem.java | 13 +---- .../namenode/RandomBlockIdGenerator.java | 52 +++++++++++++++++++ 6 files changed, 104 insertions(+), 13 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdGenerator.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RandomBlockIdGenerator.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdGenerator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdGenerator.java new file mode 100644 index 00000000000..c14727a3771 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdGenerator.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Generic ID generator + * used for generating various types of number sequences. + */ +@InterfaceAudience.Private +public interface IdGenerator { + + /** Increment and then return the next value. */ + public long nextValue(); +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SequentialNumber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SequentialNumber.java index 23213d39bd7..366e679e64b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SequentialNumber.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SequentialNumber.java @@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience; * This class is thread safe. */ @InterfaceAudience.Private -public abstract class SequentialNumber { +public abstract class SequentialNumber implements IdGenerator { private final AtomicLong currentValue; /** Create a new instance with the given initial value. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 13eab180007..a63406d06d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -701,6 +701,8 @@ Release 2.0.3-alpha - Unreleased HDFS-4274. BlockPoolSliceScanner does not close verification log during shutdown. (Chris Nauroth via suresh) + HDFS-1245. Plugable block id generation. (shv) + BREAKDOWN OF HDFS-3077 SUBTASKS HDFS-3077. Quorum-based protocol for reading and writing edit logs. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index d0d3478170e..f67b7ce852f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.util.IdGenerator; import org.apache.hadoop.util.Time; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; @@ -92,6 +93,7 @@ public class FSImage implements Closeable { final private Configuration conf; protected NNStorageRetentionManager archivalManager; + protected IdGenerator blockIdGenerator; /** * Construct an FSImage @@ -137,6 +139,9 @@ public class FSImage implements Closeable { Preconditions.checkState(fileCount == 1, "FSImage.format should be called with an uninitialized namesystem, has " + fileCount + " files"); + // BlockIdGenerator is defined during formatting + // currently there is only one BlockIdGenerator + blockIdGenerator = createBlockIdGenerator(fsn); NamespaceInfo ns = NNStorage.newNamespaceInfo(); ns.clusterID = clusterId; @@ -253,6 +258,7 @@ public class FSImage implements Closeable { doRollback(); break; case REGULAR: + default: // just load the image } @@ -737,6 +743,9 @@ public class FSImage implements Closeable { FSImageFormat.Loader loader = new FSImageFormat.Loader( conf, target); loader.load(curFile); + // BlockIdGenerator is determined after loading image + // currently there is only one BlockIdGenerator + blockIdGenerator = createBlockIdGenerator(target); target.setBlockPoolId(this.getBlockPoolID()); // Check that the image digest we loaded matches up with what @@ -1165,4 +1174,12 @@ public class FSImage implements Closeable { public synchronized long getMostRecentCheckpointTxId() { return storage.getMostRecentCheckpointTxId(); } + + public long getUniqueBlockId() { + return blockIdGenerator.nextValue(); + } + + public IdGenerator createBlockIdGenerator(FSNamesystem fsn) { + return new RandomBlockIdGenerator(fsn); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 258ef9195a0..54dcd5e7ed2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -79,7 +79,6 @@ import java.io.DataOutputStream; import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; -import java.io.FileWriter; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; @@ -2539,10 +2538,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, private Block allocateBlock(String src, INodesInPath inodesInPath, DatanodeDescriptor targets[]) throws IOException { assert hasWriteLock(); - Block b = new Block(DFSUtil.getRandom().nextLong(), 0, 0); - while(isValidBlock(b)) { - b.setBlockId(DFSUtil.getRandom().nextLong()); - } + Block b = new Block(getFSImage().getUniqueBlockId(), 0, 0); // Increment the generation stamp for every new block. b.setGenerationStamp(nextGenerationStamp()); b = dir.addBlock(src, inodesInPath, b, targets); @@ -4554,13 +4550,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } } - /** - * Returns whether the given block is one pointed-to by a file. - */ - private boolean isValidBlock(Block b) { - return (blockManager.getBlockCollection(b) != null); - } - PermissionStatus createFsOwnerPermissions(FsPermission permission) { return new PermissionStatus(fsOwner.getShortUserName(), supergroup, permission); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RandomBlockIdGenerator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RandomBlockIdGenerator.java new file mode 100644 index 00000000000..943662d4669 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RandomBlockIdGenerator.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.util.IdGenerator; + +/** + * Generator of random block IDs. + */ +@InterfaceAudience.Private +public class RandomBlockIdGenerator implements IdGenerator { + private final BlockManager blockManager; + + RandomBlockIdGenerator(FSNamesystem namesystem) { + this.blockManager = namesystem.getBlockManager(); + } + + @Override // NumberGenerator + public long nextValue() { + Block b = new Block(DFSUtil.getRandom().nextLong(), 0, 0); + while(isValidBlock(b)) { + b.setBlockId(DFSUtil.getRandom().nextLong()); + } + return b.getBlockId(); + } + + /** + * Returns whether the given block is one pointed-to by a file. + */ + private boolean isValidBlock(Block b) { + return (blockManager.getBlockCollection(b) != null); + } +} From 43f17f6e10e2be92af4f36d3802dae53497e44da Mon Sep 17 00:00:00 2001 From: Konstantin Shvachko Date: Sun, 13 Jan 2013 01:31:10 +0000 Subject: [PATCH 25/31] HDFS-1245. Change typo in Pluggable. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432542 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a63406d06d6..4a3228fe421 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -701,7 +701,7 @@ Release 2.0.3-alpha - Unreleased HDFS-4274. BlockPoolSliceScanner does not close verification log during shutdown. (Chris Nauroth via suresh) - HDFS-1245. Plugable block id generation. (shv) + HDFS-1245. Pluggable block id generation. (shv) BREAKDOWN OF HDFS-3077 SUBTASKS From ce9b0a05b00df723b126e98d9feade3c0e3c0047 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Mon, 14 Jan 2013 14:34:51 +0000 Subject: [PATCH 26/31] YARN-334. Maven RAT plugin is not checking all source files (tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432931 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 2 ++ .../hadoop-yarn/conf/yarn-site.xml | 13 +++++++++++++ ....apache.hadoop.security.token.TokenRenewer | 13 +++++++++++++ .../hadoop-yarn/hadoop-yarn-common/pom.xml | 18 ++++++++++++++++++ .../org.apache.hadoop.security.SecurityInfo | 13 +++++++++++++ ...ache.hadoop.security.token.TokenIdentifier | 13 +++++++++++++ ....apache.hadoop.security.token.TokenRenewer | 15 ++++++++++++++- .../webapps/static/dt-1.9.4/css/demo_page.css | 19 ++++++++++++++++++- .../main/resources/webapps/static/yarn.css | 17 +++++++++++++++++ .../webapps/static/yarn.dt.plugins.js | 17 +++++++++++++++++ .../org.apache.hadoop.security.SecurityInfo | 13 +++++++++++++ .../src/config.h.cmake | 17 +++++++++++++++++ .../org.apache.hadoop.security.SecurityInfo | 13 +++++++++++++ ...ache.hadoop.security.token.TokenIdentifier | 13 +++++++++++++ .../main/resources/container-log4j.properties | 13 +++++++++++++ .../yarn/server/nodemanager/MockApp.java | 0 .../server/nodemanager/MockContainer.java | 0 .../TestLocalResourcesTrackerImpl.java | 18 ++++++++++++++++++ .../mock-container-executer-with-error | 12 ++++++++++++ .../test/resources/mock-container-executor | 12 ++++++++++++ .../conf/capacity-scheduler.xml | 13 +++++++++++++ .../scheduler/SchedulerApplication.java | 17 +++++++++++++++++ .../fair/FairSchedulerConfiguration.java | 17 +++++++++++++++++ .../dao/FairSchedulerLeafQueueInfo.java | 18 ++++++++++++++++++ ....apache.hadoop.security.token.TokenRenewer | 13 +++++++++++++ hadoop-yarn-project/hadoop-yarn/pom.xml | 7 ++++--- hadoop-yarn-project/pom.xml | 6 +++--- 27 files changed, 334 insertions(+), 8 deletions(-) delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockApp.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockContainer.java diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 2f071b8e7ee..be144837d0a 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -293,6 +293,8 @@ Release 0.23.6 - UNRELEASED YARN-325. RM CapacityScheduler can deadlock when getQueueInfo() is called and a container is completing (Arun C Murthy via tgraves) + YARN-334. Maven RAT plugin is not checking all source files (tgraves) + Release 0.23.5 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/conf/yarn-site.xml b/hadoop-yarn-project/hadoop-yarn/conf/yarn-site.xml index d14deea6cc5..25292c75e3e 100644 --- a/hadoop-yarn-project/hadoop-yarn/conf/yarn-site.xml +++ b/hadoop-yarn-project/hadoop-yarn/conf/yarn-site.xml @@ -1,4 +1,17 @@ + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index 6625a9cfe53..9e78b1187e2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.security.RMDelegationTokenRenewer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index b7f4df73ff8..a08d3034a11 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -46,6 +46,24 @@ + + org.apache.rat + apache-rat-plugin + + + src/main/resources/webapps/mapreduce/.keep + src/main/resources/webapps/jobhistory/.keep + src/main/resources/webapps/yarn/.keep + src/main/resources/webapps/cluster/.keep + src/main/resources/webapps/test/.keep + src/main/resources/webapps/proxy/.keep + src/main/resources/webapps/node/.keep + src/main/resources/webapps/static/dt-1.9.4/css/jui-dt.css + src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css + src/main/resources/webapps/static/jquery/themes-1.9.1/base/jquery-ui.css + + + maven-jar-plugin diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo index c4c6eef295b..babc2fbf8e4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo @@ -1,3 +1,16 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo org.apache.hadoop.yarn.security.SchedulerSecurityInfo diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier index fc669de1572..233404037e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -1,3 +1,16 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.security.ContainerTokenIdentifier org.apache.hadoop.yarn.security.ApplicationTokenIdentifier org.apache.hadoop.yarn.security.client.ClientTokenIdentifier diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index c19ebc31e5e..3380cb8b019 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1,2 +1,15 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.security.ApplicationTokenIdentifier$Renewer -org.apache.hadoop.yarn.security.ContainerTokenIdentifier$Renewer \ No newline at end of file +org.apache.hadoop.yarn.security.ContainerTokenIdentifier$Renewer diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_page.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_page.css index bee7b0d9936..b60ee7de6ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_page.css +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_page.css @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * General page setup @@ -90,4 +107,4 @@ .css_left { float: left; -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.css index 979920b642e..9455a59cd1f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.css +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.css @@ -1,3 +1,20 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + /* Styles for YARN */ * { margin: 0; border: 0 } html, body { height: 100% } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js index 5d78aaaf7fb..3f42c7cc2f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js @@ -1,3 +1,20 @@ + +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + if (!jQuery.fn.dataTableExt.fnVersionCheck("1.7.5")) { alert("These plugins requires dataTables 1.7.5+"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo index 2e92fa85737..5a2a0095e52 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.server.RMNMSecurityInfoClass diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake index 1fff36131f6..4b59531d877 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ #ifndef CONFIG_H #define CONFIG_H diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo index 3cfe0f7fcef..c00c943ddd7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerSecurityInfo diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier index 6ed6e3261e2..539028d5b0a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerTokenIdentifier diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties index 7c859535c88..bc120f14a52 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties @@ -1,3 +1,16 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. See accompanying LICENSE file. +# # Define some default values that can be overridden by system properties hadoop.root.logger=DEBUG,CLA diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockApp.java deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockContainer.java deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java index a97294533cb..0e0a47200a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java @@ -1,3 +1,21 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import static org.mockito.Mockito.any; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executer-with-error b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executer-with-error index 4f3432cbb80..0a9ef9fff31 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executer-with-error +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executer-with-error @@ -1,4 +1,16 @@ #!/bin/sh +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + for PARAM in "$@" do echo $PARAM; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executor b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executor index d71bd6cec86..0b5986a6a12 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executor +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executor @@ -1,4 +1,16 @@ #!/bin/sh +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + for PARAM in "$@" do echo $PARAM; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml index 39682bdb7a6..0e4fe6b02e9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml @@ -1,3 +1,16 @@ + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java index 51d65e3969f..cc9b872724d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import java.util.Collection; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java index f5706d93bfd..06dc436d638 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; import java.io.File; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerLeafQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerLeafQueueInfo.java index bee1cfd9866..9a74ef4fe06 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerLeafQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerLeafQueueInfo.java @@ -1,3 +1,21 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; import java.util.Collection; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index 24c0713a5d5..f48ffa80b4f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.server.resourcemanager.security.TestDelegationTokenRenewer$Renewer diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index 559be4f4df4..5d261a2804d 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -149,9 +149,10 @@ org.apache.rat apache-rat-plugin - - pom.xml - + + conf/slaves + conf/container-executor.cfg + diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml index c1e38967c5d..a0eed4838c2 100644 --- a/hadoop-yarn-project/pom.xml +++ b/hadoop-yarn-project/pom.xml @@ -213,9 +213,9 @@ org.apache.rat apache-rat-plugin - - pom.xml - + + CHANGES.txt + From e7aeb9d129f0de9acd7fff8559a54ecaf7024481 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Mon, 14 Jan 2013 14:35:45 +0000 Subject: [PATCH 27/31] MAPREDUCE-4934. Maven RAT plugin is not checking all source files (tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432932 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 ++ .../conf/mapred-site.xml.template | 13 +++++++++++++ .../org.apache.hadoop.security.SecurityInfo | 13 +++++++++++++ .../app/launcher/TestContainerLauncherImpl.java | 17 +++++++++++++++++ .../app/local/TestLocalContainerAllocator.java | 17 +++++++++++++++++ ...apache.hadoop.security.token.TokenIdentifier | 13 +++++++++++++ ...rg.apache.hadoop.security.token.TokenRenewer | 13 +++++++++++++ ...apache.hadoop.security.token.TokenIdentifier | 13 +++++++++++++ ...rg.apache.hadoop.security.token.TokenRenewer | 13 +++++++++++++ .../hadoop-mapreduce-client-hs/pom.xml | 16 ++++++++++++++++ .../hadoop/mapreduce/v2/hs/MockHistoryJobs.java | 17 +++++++++++++++++ .../mapreduce/v2/hs/TestJobHistoryEntities.java | 17 +++++++++++++++++ .../hadoop-mapreduce-client-jobclient/pom.xml | 9 +++++++++ .../org.apache.hadoop.security.SecurityInfo | 13 +++++++++++++ .../hadoop-mapreduce-examples/pom.xml | 10 +++++++++- .../apache/hadoop/examples/TestWordStats.java | 17 +++++++++++++++++ hadoop-mapreduce-project/pom.xml | 8 +++++--- 17 files changed, 217 insertions(+), 4 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 6ec1fcfb8cb..eeec45d2cdd 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -694,6 +694,8 @@ Release 0.23.6 - UNRELEASED MAPREDUCE-4921. JobClient should acquire HS token with RM principal (daryn via bobby) + MAPREDUCE-4934. Maven RAT plugin is not checking all source files (tgraves) + Release 0.23.5 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/conf/mapred-site.xml.template b/hadoop-mapreduce-project/conf/mapred-site.xml.template index 970c8fe0e8d..761c352dd09 100644 --- a/hadoop-mapreduce-project/conf/mapred-site.xml.template +++ b/hadoop-mapreduce-project/conf/mapred-site.xml.template @@ -1,5 +1,18 @@ + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo index 35c4af09a71..3f30deb069a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.mapreduce.v2.app.MRClientSecurityInfo diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java index a53bbe69072..05164173c96 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.mapreduce.v2.app.launcher; import static org.mockito.Matchers.any; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java index 572e4942dba..91bbcb066fd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.mapreduce.v2.app.local; import static org.mockito.Matchers.isA; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier index 0975deab7e7..cc2c32d75aa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index 76846fc1c5e..aa5b6f120d8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.mapreduce.v2.security.MRDelegationTokenRenewer diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier index f797a6aa6ff..61d09500003 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -1,2 +1,15 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index 7bb8265629b..c14f282825c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier$Renewer diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml index caf65d7efe7..9d63ee9dfed 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml @@ -54,4 +54,20 @@ test + + + + + org.apache.rat + apache-rat-plugin + + + src/test/resources/job_1329348432655_0001_conf.xml + src/test/resources/job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist + + + + + + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java index da983948d10..ac37bbce7ff 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.mapreduce.v2.hs; import java.io.IOException; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java index 1080ebe5325..69b4bd7ac3b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.mapreduce.v2.hs; import static junit.framework.Assert.assertEquals; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml index 65e5f1038e9..f7b27d4246a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml @@ -148,6 +148,15 @@ + + org.apache.rat + apache-rat-plugin + + + src/test/java/org/apache/hadoop/cli/data60bytes + + + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo index 10ab75be7d6..4866b2efef2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.mapreduce.v2.security.client.ClientHSSecurityInfo diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml index 5b2231e0dbd..199791eedbf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml @@ -134,7 +134,15 @@ Max - + + org.apache.rat + apache-rat-plugin + + + src/main/java/org/apache/hadoop/examples/dancing/puzzle1.dta + + + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestWordStats.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestWordStats.java index 54575165746..56b358ef570 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestWordStats.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestWordStats.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.examples; import static org.junit.Assert.assertEquals; diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index 294441d4ade..f06ca48cb56 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -214,9 +214,11 @@ org.apache.rat apache-rat-plugin - - pom.xml - + + .eclipse.templates/ + CHANGES.txt + lib/jdiff/** + From e26da7754492fb54fe4ee96ceb80aea2bee03e37 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Mon, 14 Jan 2013 14:36:40 +0000 Subject: [PATCH 28/31] HDFS-4385. Maven RAT plugin is not checking all source files (tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432933 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs-httpfs/pom.xml | 2 ++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt | 27 +++++++++++++++++++ hadoop-hdfs-project/hadoop-hdfs/pom.xml | 2 ++ .../hadoop-hdfs/src/config.h.cmake | 17 ++++++++++++ .../protocolProtocolBuffers/overview.html | 0 .../hdfs/server/namenode/BackupState.java | 17 ++++++++++++ ...ache.hadoop.security.token.TokenIdentifier | 13 +++++++++ ....apache.hadoop.security.token.TokenRenewer | 13 +++++++++ .../server/datanode/DataXceiverAspects.aj | 0 .../org/apache/hadoop/tools/FakeRenewer.java | 19 ++++++++++++- ....apache.hadoop.security.token.TokenRenewer | 13 +++++++++ hadoop-hdfs-project/pom.xml | 3 --- 13 files changed, 124 insertions(+), 4 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolProtocolBuffers/overview.html delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index ec02b61d1be..fb5febbe18f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -359,6 +359,8 @@ apache-rat-plugin + src/test/resources/classutils.txt + src/main/conf/httpfs-signature.secret diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4a3228fe421..69773f9ae58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -2202,6 +2202,8 @@ Release 0.23.6 - UNRELEASED HDFS-4248. Renaming directories may incorrectly remove the paths in leases under the tree. (daryn via szetszwo) + HDFS-4385. Maven RAT plugin is not checking all source files (tgraves) + Release 0.23.5 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt b/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt index 59bcdbc9783..966012349ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt @@ -242,3 +242,30 @@ For the org.apache.hadoop.util.bloom.* classes: * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ + +For src/main/native/util/tree.h: + +/*- + * Copyright 2002 Niels Provos + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 67799c85700..6d110e8d704 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -516,6 +516,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> src/test/resources/data* src/test/resources/editsStored* src/test/resources/empty-file + src/main/native/util/tree.h + src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj src/main/webapps/datanode/robots.txt src/main/docs/releasenotes.html src/contrib/** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake b/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake index 912a4ba8546..ac0b5308cc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake +++ b/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ #ifndef CONFIG_H #define CONFIG_H diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolProtocolBuffers/overview.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolProtocolBuffers/overview.html deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java index ce11fc9e687..e2a5035e962 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier index 10b874b6855..59603a96eb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -1,2 +1,15 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index 20addd74b00..5889c12d329 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1,3 +1,16 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.hdfs.DFSClient$Renewer org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier$Renewer org.apache.hadoop.hdfs.HftpFileSystem$TokenManager diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java index d6f9171b1d6..00f9815537c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.tools; import java.io.IOException; @@ -37,4 +54,4 @@ public class FakeRenewer extends TokenRenewer { lastRenewed = null; lastCanceled = null; } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index 721b9961607..e514c9b647c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.tools.FakeRenewer diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml index 27161004a36..49596ef804e 100644 --- a/hadoop-hdfs-project/pom.xml +++ b/hadoop-hdfs-project/pom.xml @@ -48,9 +48,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.rat apache-rat-plugin - - pom.xml - From ab0e94d2a5664adf10f54b98da383e8a389c067b Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Mon, 14 Jan 2013 14:37:12 +0000 Subject: [PATCH 29/31] HADOOP-9097. Maven RAT plugin is not checking all source files (tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432934 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ hadoop-common-project/hadoop-common/pom.xml | 3 ++- .../hadoop-common/src/config.h.cmake | 17 +++++++++++++++++ .../org.apache.hadoop.security.SecurityInfo | 13 +++++++++++++ .../hadoop/fs/TestDelegationTokenRenewer.java | 17 +++++++++++++++++ ...apache.hadoop.security.token.TokenIdentifier | 13 +++++++++++++ hadoop-common-project/pom.xml | 3 --- hadoop-dist/pom.xml | 3 --- .../src/main/resources/distcp-default.xml | 15 +++++++++++++++ .../hadoop-distcp/src/site/xdoc/appendix.xml | 15 +++++++++++++++ .../src/site/xdoc/architecture.xml | 15 +++++++++++++++ .../hadoop-distcp/src/site/xdoc/cli.xml | 15 +++++++++++++++ .../hadoop-distcp/src/site/xdoc/index.xml | 15 +++++++++++++++ .../hadoop-distcp/src/site/xdoc/usage.xml | 15 +++++++++++++++ .../src/test/resources/sslConfig.xml | 15 +++++++++++++++ .../src/main/native/examples/conf/word-part.xml | 17 +++++++++++++++++ .../src/main/native/examples/conf/word.xml | 17 +++++++++++++++++ .../pipes/debug/pipes-default-gdb-commands.txt | 11 +++++++++++ .../native/pipes/debug/pipes-default-script | 11 +++++++++++ .../tools/rumen/anonymization/WordList.java | 17 +++++++++++++++++ hadoop-tools/hadoop-tools-dist/pom.xml | 3 --- hadoop-tools/pom.xml | 3 --- pom.xml | 14 ++++++++------ 23 files changed, 250 insertions(+), 19 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 2ca826bfb99..05ff9d16826 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -1250,6 +1250,8 @@ Release 0.23.6 - UNRELEASED HADOOP-9105. FsShell -moveFromLocal erroneously fails (daryn via bobby) + HADOOP-9097. Maven RAT plugin is not checking all source files (tgraves) + Release 0.23.5 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index a4f7ceabc00..fe17808738d 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -445,10 +445,11 @@ dev-support/jdiff/** src/main/native/* src/main/native/config/* - src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo src/main/native/m4/* src/test/empty-file src/test/all-tests + src/test/resources/kdc/ldif/users.ldif + src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c diff --git a/hadoop-common-project/hadoop-common/src/config.h.cmake b/hadoop-common-project/hadoop-common/src/config.h.cmake index 7423de73a82..e720d306570 100644 --- a/hadoop-common-project/hadoop-common/src/config.h.cmake +++ b/hadoop-common-project/hadoop-common/src/config.h.cmake @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ #ifndef CONFIG_H #define CONFIG_H diff --git a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo index 5295f3be2fe..f7f3ec255df 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo +++ b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.security.AnnotatedSecurityInfo diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java index 3f1d34e99b7..5bd94c3ef43 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.fs; import java.io.FileNotFoundException; diff --git a/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier index 891a67b61f4..56eab0553d2 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier +++ b/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -1,2 +1,15 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml index 2ee82dd7500..a09c29b67a2 100644 --- a/hadoop-common-project/pom.xml +++ b/hadoop-common-project/pom.xml @@ -49,9 +49,6 @@ org.apache.rat apache-rat-plugin - - pom.xml - diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index db0412fe46a..2e03c0ebab0 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -66,9 +66,6 @@ org.apache.rat apache-rat-plugin - - pom.xml - diff --git a/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml b/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml index 0d936c9f0c1..a5c44431810 100644 --- a/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml +++ b/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml @@ -1,5 +1,20 @@ + diff --git a/hadoop-tools/hadoop-distcp/src/site/xdoc/appendix.xml b/hadoop-tools/hadoop-distcp/src/site/xdoc/appendix.xml index 84662c076c7..49e8e3a0759 100644 --- a/hadoop-tools/hadoop-distcp/src/site/xdoc/appendix.xml +++ b/hadoop-tools/hadoop-distcp/src/site/xdoc/appendix.xml @@ -1,4 +1,19 @@ + diff --git a/hadoop-tools/hadoop-distcp/src/site/xdoc/architecture.xml b/hadoop-tools/hadoop-distcp/src/site/xdoc/architecture.xml index 18c49259ae7..fd536c7290b 100644 --- a/hadoop-tools/hadoop-distcp/src/site/xdoc/architecture.xml +++ b/hadoop-tools/hadoop-distcp/src/site/xdoc/architecture.xml @@ -1,4 +1,19 @@ + diff --git a/hadoop-tools/hadoop-distcp/src/site/xdoc/cli.xml b/hadoop-tools/hadoop-distcp/src/site/xdoc/cli.xml index e4eccd54878..f35038f85a5 100644 --- a/hadoop-tools/hadoop-distcp/src/site/xdoc/cli.xml +++ b/hadoop-tools/hadoop-distcp/src/site/xdoc/cli.xml @@ -1,4 +1,19 @@ + diff --git a/hadoop-tools/hadoop-distcp/src/site/xdoc/index.xml b/hadoop-tools/hadoop-distcp/src/site/xdoc/index.xml index 27108a7dcef..62e48fce8b5 100644 --- a/hadoop-tools/hadoop-distcp/src/site/xdoc/index.xml +++ b/hadoop-tools/hadoop-distcp/src/site/xdoc/index.xml @@ -1,4 +1,19 @@ + diff --git a/hadoop-tools/hadoop-distcp/src/site/xdoc/usage.xml b/hadoop-tools/hadoop-distcp/src/site/xdoc/usage.xml index a72ec05418e..208b0b7df94 100644 --- a/hadoop-tools/hadoop-distcp/src/site/xdoc/usage.xml +++ b/hadoop-tools/hadoop-distcp/src/site/xdoc/usage.xml @@ -1,3 +1,18 @@ + diff --git a/hadoop-tools/hadoop-distcp/src/test/resources/sslConfig.xml b/hadoop-tools/hadoop-distcp/src/test/resources/sslConfig.xml index 016edf27ed1..64485f11f87 100644 --- a/hadoop-tools/hadoop-distcp/src/test/resources/sslConfig.xml +++ b/hadoop-tools/hadoop-distcp/src/test/resources/sslConfig.xml @@ -1,5 +1,20 @@ + diff --git a/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word-part.xml b/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word-part.xml index b552a1ccd76..5425de205bb 100644 --- a/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word-part.xml +++ b/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word-part.xml @@ -1,4 +1,21 @@ + + diff --git a/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word.xml b/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word.xml index ed727ddf333..9d1cd572dc3 100644 --- a/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word.xml +++ b/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word.xml @@ -1,4 +1,21 @@ + + diff --git a/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-gdb-commands.txt b/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-gdb-commands.txt index 6cfd4d6d121..906522c73c3 100644 --- a/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-gdb-commands.txt +++ b/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-gdb-commands.txt @@ -1,3 +1,14 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. info threads backtrace quit diff --git a/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-script b/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-script index e7f59e5f6f5..6bacc437e43 100644 --- a/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-script +++ b/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-script @@ -1,3 +1,14 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. core=`find . -name 'core*'` #Only pipes programs have 5th argument as program name. gdb -quiet $5 -c $core -x $HADOOP_PREFIX/src/c++/pipes/debug/pipes-default-gdb-commands.txt diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordList.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordList.java index f160fcfab77..10b8d84852e 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordList.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordList.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.tools.rumen.anonymization; import java.util.HashMap; diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml index 951eab139c5..cbd0bb545a2 100644 --- a/hadoop-tools/hadoop-tools-dist/pom.xml +++ b/hadoop-tools/hadoop-tools-dist/pom.xml @@ -91,9 +91,6 @@ org.apache.rat apache-rat-plugin - - pom.xml - diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index bc75e2b6536..f6fada59ac1 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -54,9 +54,6 @@ org.apache.rat apache-rat-plugin - - pom.xml - diff --git a/pom.xml b/pom.xml index 0d97191b955..43dd1525aec 100644 --- a/pom.xml +++ b/pom.xml @@ -294,12 +294,14 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.rat apache-rat-plugin - - - dev-support/* - pom.xml - - + + + .gitattributes + .gitignore + .git/** + .idea/** + + maven-site-plugin From 337e066bc3d669ec052c758ae6531747158ef84f Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Mon, 14 Jan 2013 15:14:39 +0000 Subject: [PATCH 30/31] HADOOP-9202. test-patch.sh fails during mvn eclipse:eclipse if patch adds a new module to the build (Chris Nauroth via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1432949 13f79535-47bb-0310-9956-ffa450edef68 --- dev-support/test-patch.sh | 4 ++-- hadoop-common-project/hadoop-common/CHANGES.txt | 7 +++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 3800824a82c..4653efdbda9 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -980,12 +980,12 @@ fi (( RESULT = RESULT + $JAVAC_RET )) checkJavadocWarnings (( RESULT = RESULT + $? )) -checkEclipseGeneration -(( RESULT = RESULT + $? )) ### Checkstyle not implemented yet #checkStyle #(( RESULT = RESULT + $? )) buildAndInstall +checkEclipseGeneration +(( RESULT = RESULT + $? )) checkFindbugsWarnings (( RESULT = RESULT + $? )) checkReleaseAuditWarnings diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 05ff9d16826..dd4605504f2 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -308,10 +308,13 @@ Trunk (Unreleased) HADOOP-9131. Turn off TestLocalFileSystem#testListStatusWithColons on Windows. (Chris Nauroth via suresh) - HADOOP-8957 AbstractFileSystem#IsValidName should be overridden for + HADOOP-8957. AbstractFileSystem#IsValidName should be overridden for embedded file systems like ViewFs (Chris Nauroth via Sanjay Radia) - HADOOP-9139 improve killKdc.sh (Ivan A. Veselovsky via bobby) + HADOOP-9139. improve killKdc.sh (Ivan A. Veselovsky via bobby) + + HADOOP-9202. test-patch.sh fails during mvn eclipse:eclipse if patch adds + a new module to the build (Chris Nauroth via bobby) OPTIMIZATIONS From 3052ad1f0069af5caee621374b29d17d7f12ab51 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Mon, 14 Jan 2013 20:47:08 +0000 Subject: [PATCH 31/31] HDFS-3429. DataNode reads checksums even if client does not need them. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1433117 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../apache/hadoop/hdfs/RemoteBlockReader.java | 3 +- .../hadoop/hdfs/RemoteBlockReader2.java | 3 +- .../datatransfer/DataTransferProtocol.java | 5 +- .../hdfs/protocol/datatransfer/Receiver.java | 3 +- .../hdfs/protocol/datatransfer/Sender.java | 8 ++- .../datanode/BlockPoolSliceScanner.java | 4 +- .../hdfs/server/datanode/BlockSender.java | 61 ++++++++++++------- .../hadoop/hdfs/server/datanode/DataNode.java | 2 +- .../hdfs/server/datanode/DataXceiver.java | 7 ++- .../src/main/proto/datatransfer.proto | 1 + .../hadoop/hdfs/TestDataTransferProtocol.java | 12 ++-- .../apache/hadoop/hdfs/TestParallelRead.java | 10 +++ .../hadoop/hdfs/TestParallelReadUtil.java | 4 +- .../org/apache/hadoop/hdfs/TestPread.java | 18 +++++- 15 files changed, 101 insertions(+), 42 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 69773f9ae58..c23876e9c18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -482,6 +482,8 @@ Release 2.0.3-alpha - Unreleased OPTIMIZATIONS + HDFS-3429. DataNode reads checksums even if client does not need them (todd) + BUG FIXES HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java index dc449ee2f24..f7ac589921e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java @@ -380,7 +380,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader { // in and out will be closed when sock is closed (by the caller) final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT))); - new Sender(out).readBlock(block, blockToken, clientName, startOffset, len); + new Sender(out).readBlock(block, blockToken, clientName, startOffset, len, + verifyChecksum); // // Get bytes in block, set streams diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java index 3450cd1524d..58bb37a724a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java @@ -392,7 +392,8 @@ public class RemoteBlockReader2 implements BlockReader { // in and out will be closed when sock is closed (by the caller) final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( ioStreams.out)); - new Sender(out).readBlock(block, blockToken, clientName, startOffset, len); + new Sender(out).readBlock(block, blockToken, clientName, startOffset, len, + verifyChecksum); // // Get bytes in block diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java index 98094472a73..7f4463789b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java @@ -55,12 +55,15 @@ public interface DataTransferProtocol { * @param clientName client's name. * @param blockOffset offset of the block. * @param length maximum number of bytes for this read. + * @param sendChecksum if false, the DN should skip reading and sending + * checksums */ public void readBlock(final ExtendedBlock blk, final Token blockToken, final String clientName, final long blockOffset, - final long length) throws IOException; + final long length, + final boolean sendChecksum) throws IOException; /** * Write a block to a datanode pipeline. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java index b1edc20e3a9..a156dfa538a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java @@ -88,7 +88,8 @@ public abstract class Receiver implements DataTransferProtocol { PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), proto.getOffset(), - proto.getLen()); + proto.getLen(), + proto.getSendChecksums()); } /** Receive OP_WRITE_BLOCK */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java index 8184c500f8b..fb8bee5388b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java @@ -62,6 +62,10 @@ public class Sender implements DataTransferProtocol { private static void send(final DataOutputStream out, final Op opcode, final Message proto) throws IOException { + if (LOG.isTraceEnabled()) { + LOG.trace("Sending DataTransferOp " + proto.getClass().getSimpleName() + + ": " + proto); + } op(out, opcode); proto.writeDelimitedTo(out); out.flush(); @@ -72,12 +76,14 @@ public class Sender implements DataTransferProtocol { final Token blockToken, final String clientName, final long blockOffset, - final long length) throws IOException { + final long length, + final boolean sendChecksum) throws IOException { OpReadBlockProto proto = OpReadBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken)) .setOffset(blockOffset) .setLen(length) + .setSendChecksums(sendChecksum) .build(); send(out, Op.READ_BLOCK, proto); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java index d3d2f915ca4..8a117546ff5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java @@ -388,8 +388,8 @@ class BlockPoolSliceScanner { try { adjustThrottler(); - blockSender = new BlockSender(block, 0, -1, false, true, datanode, - null); + blockSender = new BlockSender(block, 0, -1, false, true, true, + datanode, null); DataOutputStream out = new DataOutputStream(new IOUtils.NullOutputStream()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index bbcb2dd2e1f..fdade84f0ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -45,6 +45,8 @@ import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.net.SocketOutputStream; import org.apache.hadoop.util.DataChecksum; +import com.google.common.base.Preconditions; + /** * Reads a block from the disk and sends it to a recipient. * @@ -158,12 +160,14 @@ class BlockSender implements java.io.Closeable { * @param length length of data to read * @param corruptChecksumOk * @param verifyChecksum verify checksum while reading the data + * @param sendChecksum send checksum to client. * @param datanode datanode from which the block is being read * @param clientTraceFmt format string used to print client trace logs * @throws IOException */ BlockSender(ExtendedBlock block, long startOffset, long length, boolean corruptChecksumOk, boolean verifyChecksum, + boolean sendChecksum, DataNode datanode, String clientTraceFmt) throws IOException { try { @@ -175,6 +179,13 @@ class BlockSender implements java.io.Closeable { this.shouldDropCacheBehindRead = datanode.getDnConf().dropCacheBehindReads; this.datanode = datanode; + if (verifyChecksum) { + // To simplify implementation, callers may not specify verification + // without sending. + Preconditions.checkArgument(sendChecksum, + "If verifying checksum, currently must also send it."); + } + final Replica replica; final long replicaVisibleLength; synchronized(datanode.data) { @@ -213,29 +224,37 @@ class BlockSender implements java.io.Closeable { * False, True: will verify checksum * False, False: throws IOException file not found */ - DataChecksum csum; - final InputStream metaIn = datanode.data.getMetaDataInputStream(block); - if (!corruptChecksumOk || metaIn != null) { - if (metaIn == null) { - //need checksum but meta-data not found - throw new FileNotFoundException("Meta-data not found for " + block); - } - - checksumIn = new DataInputStream( - new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE)); + DataChecksum csum = null; + if (verifyChecksum || sendChecksum) { + final InputStream metaIn = datanode.data.getMetaDataInputStream(block); + if (!corruptChecksumOk || metaIn != null) { + if (metaIn == null) { + //need checksum but meta-data not found + throw new FileNotFoundException("Meta-data not found for " + block); + } - // read and handle the common header here. For now just a version - BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); - short version = header.getVersion(); - if (version != BlockMetadataHeader.VERSION) { - LOG.warn("Wrong version (" + version + ") for metadata file for " - + block + " ignoring ..."); + checksumIn = new DataInputStream( + new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE)); + + // read and handle the common header here. For now just a version + BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); + short version = header.getVersion(); + if (version != BlockMetadataHeader.VERSION) { + LOG.warn("Wrong version (" + version + ") for metadata file for " + + block + " ignoring ..."); + } + csum = header.getChecksum(); + } else { + LOG.warn("Could not find metadata file for " + block); } - csum = header.getChecksum(); - } else { - LOG.warn("Could not find metadata file for " + block); - // This only decides the buffer size. Use BUFFER_SIZE? - csum = DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 16 * 1024); + } + if (csum == null) { + // The number of bytes per checksum here determines the alignment + // of reads: we always start reading at a checksum chunk boundary, + // even if the checksum type is NULL. So, choosing too big of a value + // would risk sending too much unnecessary data. 512 (1 disk sector) + // is likely to result in minimal extra IO. + csum = DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 512); } /* diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index c1845fd152b..375c6954a7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -1441,7 +1441,7 @@ public class DataNode extends Configured HdfsConstants.SMALL_BUFFER_SIZE)); in = new DataInputStream(unbufIn); blockSender = new BlockSender(b, 0, b.getNumBytes(), - false, false, DataNode.this, null); + false, false, true, DataNode.this, null); DatanodeInfo srcNode = new DatanodeInfo(bpReg); // diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 255fd35ff35..1d4c1c3fc70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -241,7 +241,8 @@ class DataXceiver extends Receiver implements Runnable { final Token blockToken, final String clientName, final long blockOffset, - final long length) throws IOException { + final long length, + final boolean sendChecksum) throws IOException { previousOpClientName = clientName; OutputStream baseStream = getOutputStream(); @@ -266,7 +267,7 @@ class DataXceiver extends Receiver implements Runnable { try { try { blockSender = new BlockSender(block, blockOffset, length, - true, false, datanode, clientTraceFmt); + true, false, sendChecksum, datanode, clientTraceFmt); } catch(IOException e) { String msg = "opReadBlock " + block + " received exception " + e; LOG.info(msg); @@ -654,7 +655,7 @@ class DataXceiver extends Receiver implements Runnable { try { // check if the block exists or not - blockSender = new BlockSender(block, 0, -1, false, false, datanode, + blockSender = new BlockSender(block, 0, -1, false, false, true, datanode, null); // set up response stream diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto index 8ce5fd75661..d97bd7daee1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto @@ -52,6 +52,7 @@ message OpReadBlockProto { required ClientOperationHeaderProto header = 1; required uint64 offset = 2; required uint64 len = 3; + optional bool sendChecksums = 4 [default = true]; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index 77ea9c5907e..d699f750fc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -444,21 +444,21 @@ public class TestDataTransferProtocol { recvBuf.reset(); blk.setBlockId(blkid-1); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - 0L, fileLen); + 0L, fileLen, true); sendRecvData("Wrong block ID " + newBlockId + " for read", false); // negative block start offset -1L sendBuf.reset(); blk.setBlockId(blkid); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - -1L, fileLen); + -1L, fileLen, true); sendRecvData("Negative start-offset for read for block " + firstBlock.getBlockId(), false); // bad block start offset sendBuf.reset(); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - fileLen, fileLen); + fileLen, fileLen, true); sendRecvData("Wrong start-offset for reading block " + firstBlock.getBlockId(), false); @@ -475,7 +475,7 @@ public class TestDataTransferProtocol { sendBuf.reset(); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - 0L, -1L-random.nextInt(oneMil)); + 0L, -1L-random.nextInt(oneMil), true); sendRecvData("Negative length for reading block " + firstBlock.getBlockId(), false); @@ -488,14 +488,14 @@ public class TestDataTransferProtocol { recvOut); sendBuf.reset(); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - 0L, fileLen+1); + 0L, fileLen+1, true); sendRecvData("Wrong length for reading block " + firstBlock.getBlockId(), false); //At the end of all this, read the file to make sure that succeeds finally. sendBuf.reset(); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - 0L, fileLen); + 0L, fileLen, true); readFile(fileSys, file, fileLen); } finally { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java index b4320520354..fa384cde7a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java @@ -19,6 +19,9 @@ package org.apache.hadoop.hdfs; import java.io.IOException; +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; +import org.apache.log4j.Level; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -56,4 +59,11 @@ public class TestParallelRead extends TestParallelReadUtil { public void testParallelReadMixed() throws IOException { runTestWorkload(new MixedWorkloadHelper()); } + + @Test + public void testParallelNoChecksums() throws IOException { + verifyChecksums = false; + runTestWorkload(new MixedWorkloadHelper()); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java index 1c59eca871d..51c3200d2ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java @@ -46,6 +46,7 @@ public class TestParallelReadUtil { static final int FILE_SIZE_K = 256; static Random rand = null; static final int DEFAULT_REPLICATION_FACTOR = 2; + protected boolean verifyChecksums = true; static { // The client-trace log ends up causing a lot of blocking threads @@ -317,7 +318,8 @@ public class TestParallelReadUtil { testInfo.filepath = new Path("/TestParallelRead.dat." + i); testInfo.authenticData = util.writeFile(testInfo.filepath, FILE_SIZE_K); - testInfo.dis = dfsClient.open(testInfo.filepath.toString()); + testInfo.dis = dfsClient.open(testInfo.filepath.toString(), + dfsClient.dfsClientConf.ioBufferSize, verifyChecksums); for (int j = 0; j < nWorkerEach; ++j) { workers[nWorkers++] = new ReadWorker(testInfo, nWorkers, helper); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java index 1e0681f4711..9afa493391a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java @@ -24,11 +24,14 @@ import java.io.DataOutputStream; import java.io.IOException; import java.util.Random; +import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.apache.log4j.Level; import org.junit.Test; /** @@ -194,11 +197,19 @@ public class TestPread { */ @Test public void testPreadDFS() throws IOException { - dfsPreadTest(false); //normal pread - dfsPreadTest(true); //trigger read code path without transferTo. + dfsPreadTest(false, true); //normal pread + dfsPreadTest(true, true); //trigger read code path without transferTo. } - private void dfsPreadTest(boolean disableTransferTo) throws IOException { + @Test + public void testPreadDFSNoChecksum() throws IOException { + ((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL); + dfsPreadTest(false, false); + dfsPreadTest(true, false); + } + + private void dfsPreadTest(boolean disableTransferTo, boolean verifyChecksum) + throws IOException { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096); @@ -210,6 +221,7 @@ public class TestPread { } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fileSys = cluster.getFileSystem(); + fileSys.setVerifyChecksum(verifyChecksum); try { Path file1 = new Path("preadtest.dat"); writeFile(fileSys, file1);