diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c2f0363fc09..be5c238f18f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -418,6 +418,9 @@ Release 2.8.0 - UNRELEASED HDFS-8100. Refactor DFSClient.Conf to a standalone class and separates short-circuit related conf to ShortCircuitConf. (szetszwo) + HDFS-8103. Move BlockTokenSecretManager.AccessMode into + BlockTokenIdentifier. (wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java index 0df7067509a..cdfe7ec54b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java @@ -23,7 +23,7 @@ import java.util.Map; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; @@ -81,7 +81,7 @@ public byte[] retrievePassword(BlockTokenIdentifier identifier) /** * See {@link BlockTokenSecretManager#checkAccess(BlockTokenIdentifier, - * String, ExtendedBlock, AccessMode)} + * String, ExtendedBlock, BlockTokenIdentifier.AccessMode)} */ public void checkAccess(BlockTokenIdentifier id, String userId, ExtendedBlock block, AccessMode mode) throws InvalidToken { @@ -90,7 +90,7 @@ public void checkAccess(BlockTokenIdentifier id, String userId, /** * See {@link BlockTokenSecretManager#checkAccess(Token, String, - * ExtendedBlock, AccessMode)} + * ExtendedBlock, BlockTokenIdentifier.AccessMode)} */ public void checkAccess(Token token, String userId, ExtendedBlock block, AccessMode mode) throws InvalidToken { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java index 67b1fe95389..e293dcc3886 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java @@ -24,7 +24,6 @@ import java.util.EnumSet; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -35,6 +34,10 @@ public class BlockTokenIdentifier extends TokenIdentifier { static final Text KIND_NAME = new Text("HDFS_BLOCK_TOKEN"); + public enum AccessMode { + READ, WRITE, COPY, REPLACE + } + private long expiryDate; private int keyId; private String userId; @@ -175,7 +178,7 @@ public byte[] getBytes() { return cache; } - + @InterfaceAudience.Private public static class Renewer extends Token.TrivialRenewer { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java index a3685ca39b6..b103c1abc9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java @@ -80,9 +80,7 @@ public class BlockTokenSecretManager extends private final SecureRandom nonceGenerator = new SecureRandom(); - public static enum AccessMode { - READ, WRITE, COPY, REPLACE - }; + ; /** * Constructor for slaves. @@ -239,7 +237,7 @@ synchronized boolean updateKeys() throws IOException { /** Generate an block token for current user */ public Token generateToken(ExtendedBlock block, - EnumSet modes) throws IOException { + EnumSet modes) throws IOException { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); String userID = (ugi == null ? null : ugi.getShortUserName()); return generateToken(userID, block, modes); @@ -247,7 +245,7 @@ public Token generateToken(ExtendedBlock block, /** Generate a block token for a specified user */ public Token generateToken(String userId, - ExtendedBlock block, EnumSet modes) throws IOException { + ExtendedBlock block, EnumSet modes) throws IOException { BlockTokenIdentifier id = new BlockTokenIdentifier(userId, block .getBlockPoolId(), block.getBlockId(), modes); return new Token(id, this); @@ -259,7 +257,7 @@ public Token generateToken(String userId, * when token password has already been verified (e.g., in the RPC layer). */ public void checkAccess(BlockTokenIdentifier id, String userId, - ExtendedBlock block, AccessMode mode) throws InvalidToken { + ExtendedBlock block, BlockTokenIdentifier.AccessMode mode) throws InvalidToken { if (LOG.isDebugEnabled()) { LOG.debug("Checking access for user=" + userId + ", block=" + block + ", access mode=" + mode + " using " + id.toString()); @@ -288,7 +286,7 @@ public void checkAccess(BlockTokenIdentifier id, String userId, /** Check if access should be allowed. userID is not checked if null */ public void checkAccess(Token token, String userId, - ExtendedBlock block, AccessMode mode) throws InvalidToken { + ExtendedBlock block, BlockTokenIdentifier.AccessMode mode) throws InvalidToken { BlockTokenIdentifier id = new BlockTokenIdentifier(); try { id.readFields(new DataInputStream(new ByteArrayInputStream(token diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java index 2ac8f483ee2..1c6b352b7b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; @@ -100,7 +99,7 @@ public Token getAccessToken(ExtendedBlock eb "Cannot get access token since BlockKeyUpdater is not running"); } return blockTokenSecretManager.generateToken(null, eb, - EnumSet.of(AccessMode.REPLACE, AccessMode.COPY)); + EnumSet.of(BlockTokenIdentifier.AccessMode.REPLACE, BlockTokenIdentifier.AccessMode.COPY)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 9a6535eff91..e2c9b8965a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -55,8 +55,9 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason; @@ -747,7 +748,7 @@ public LocatedBlock convertLastBlockToUnderConstruction( final long fileLength = bc.computeContentSummary(getStoragePolicySuite()).getLength(); final long pos = fileLength - ucBlock.getNumBytes(); - return createLocatedBlock(ucBlock, pos, AccessMode.WRITE); + return createLocatedBlock(ucBlock, pos, BlockTokenIdentifier.AccessMode.WRITE); } /** @@ -813,7 +814,7 @@ private LocatedBlock createLocatedBlock(final BlockInfoContiguous[] blocks, } private LocatedBlock createLocatedBlock(final BlockInfoContiguous blk, final long pos, - final BlockTokenSecretManager.AccessMode mode) throws IOException { + final AccessMode mode) throws IOException { final LocatedBlock lb = createLocatedBlock(blk, pos); if (mode != null) { setBlockToken(lb, mode); @@ -886,7 +887,7 @@ public LocatedBlocks createLocatedBlocks(final BlockInfoContiguous[] blocks, if (LOG.isDebugEnabled()) { LOG.debug("blocks = " + java.util.Arrays.asList(blocks)); } - final AccessMode mode = needBlockToken? AccessMode.READ: null; + final AccessMode mode = needBlockToken? BlockTokenIdentifier.AccessMode.READ: null; final List locatedblocks = createLocatedBlockList( blocks, offset, length, Integer.MAX_VALUE, mode); @@ -918,7 +919,7 @@ public ExportedBlockKeys getBlockKeys() { /** Generate a block token for the located block. */ public void setBlockToken(final LocatedBlock b, - final BlockTokenSecretManager.AccessMode mode) throws IOException { + final AccessMode mode) throws IOException { if (isBlockTokenEnabled()) { // Use cached UGI if serving RPC calls. b.setBlockToken(blockTokenSecretManager.generateToken( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 8c08871b25a..b0e79e35b72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -143,7 +143,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; @@ -157,7 +157,6 @@ import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; -import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; import org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; @@ -1540,7 +1539,7 @@ public long getMaxNumberOfBlocksToLog() { public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block, Token token) throws IOException { checkBlockLocalPathAccess(); - checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ); + checkBlockToken(block, token, BlockTokenIdentifier.AccessMode.READ); Preconditions.checkNotNull(data, "Storage not yet initialized"); BlockLocalPathInfo info = data.getBlockLocalPathInfo(block); if (LOG.isDebugEnabled()) { @@ -1585,7 +1584,7 @@ FileInputStream[] requestShortCircuitFdsForRead(final ExtendedBlock blk, throw new ShortCircuitFdsUnsupportedException( fileDescriptorPassingDisabledReason); } - checkBlockToken(blk, token, BlockTokenSecretManager.AccessMode.READ); + checkBlockToken(blk, token, BlockTokenIdentifier.AccessMode.READ); int blkVersion = CURRENT_BLOCK_FORMAT_VERSION; if (maxVersion < blkVersion) { throw new ShortCircuitFdsVersionException("Your client is too old " + @@ -1622,7 +1621,7 @@ public HdfsBlocksMetadata getHdfsBlocksMetadata( // Check access for each block for (int i = 0; i < blockIds.length; i++) { checkBlockToken(new ExtendedBlock(bpId, blockIds[i]), - tokens.get(i), BlockTokenSecretManager.AccessMode.READ); + tokens.get(i), BlockTokenIdentifier.AccessMode.READ); } DataNodeFaultInjector.get().getHdfsBlocksMetadata(); @@ -2124,7 +2123,7 @@ public void run() { Token accessToken = BlockTokenSecretManager.DUMMY_TOKEN; if (isBlockTokenEnabled) { accessToken = blockPoolTokenSecretManager.generateToken(b, - EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE)); + EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE)); } long writeTimeout = dnConf.socketWriteTimeout + @@ -2847,7 +2846,7 @@ private void checkReadAccess(final ExtendedBlock block) throws IOException { LOG.debug("Got: " + id.toString()); } blockPoolTokenSecretManager.checkAccess(id, null, block, - BlockTokenSecretManager.AccessMode.READ); + BlockTokenIdentifier.AccessMode.READ); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index cf1b6bebeab..83d644929fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -70,7 +70,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsUnsupportedException; import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsVersionException; @@ -507,7 +506,7 @@ public void readBlock(final ExtendedBlock block, DataOutputStream out = new DataOutputStream(new BufferedOutputStream( baseStream, HdfsConstants.SMALL_BUFFER_SIZE)); checkAccess(out, true, block, blockToken, - Op.READ_BLOCK, BlockTokenSecretManager.AccessMode.READ); + Op.READ_BLOCK, BlockTokenIdentifier.AccessMode.READ); // send the block BlockSender blockSender = null; @@ -651,7 +650,7 @@ public void writeBlock(final ExtendedBlock block, getOutputStream(), HdfsConstants.SMALL_BUFFER_SIZE)); checkAccess(replyOut, isClient, block, blockToken, - Op.WRITE_BLOCK, BlockTokenSecretManager.AccessMode.WRITE); + Op.WRITE_BLOCK, BlockTokenIdentifier.AccessMode.WRITE); DataOutputStream mirrorOut = null; // stream to next target DataInputStream mirrorIn = null; // reply from next target @@ -849,7 +848,7 @@ public void transferBlock(final ExtendedBlock blk, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes) throws IOException { checkAccess(socketOut, true, blk, blockToken, - Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY); + Op.TRANSFER_BLOCK, BlockTokenIdentifier.AccessMode.COPY); previousOpClientName = clientName; updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk); @@ -911,7 +910,7 @@ public void blockChecksum(final ExtendedBlock block, final DataOutputStream out = new DataOutputStream( getOutputStream()); checkAccess(out, true, block, blockToken, - Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ); + Op.BLOCK_CHECKSUM, BlockTokenIdentifier.AccessMode.READ); // client side now can specify a range of the block for checksum long requestLength = block.getNumBytes(); Preconditions.checkArgument(requestLength >= 0); @@ -976,7 +975,7 @@ public void copyBlock(final ExtendedBlock block, if (datanode.isBlockTokenEnabled) { try { datanode.blockPoolTokenSecretManager.checkAccess(blockToken, null, block, - BlockTokenSecretManager.AccessMode.COPY); + BlockTokenIdentifier.AccessMode.COPY); } catch (InvalidToken e) { LOG.warn("Invalid access token in request from " + remoteAddress + " for OP_COPY_BLOCK for block " + block + " : " @@ -1064,7 +1063,7 @@ public void replaceBlock(final ExtendedBlock block, if (datanode.isBlockTokenEnabled) { try { datanode.blockPoolTokenSecretManager.checkAccess(blockToken, null, block, - BlockTokenSecretManager.AccessMode.REPLACE); + BlockTokenIdentifier.AccessMode.REPLACE); } catch (InvalidToken e) { LOG.warn("Invalid access token in request from " + remoteAddress + " for OP_REPLACE_BLOCK for block " + block + " : " @@ -1251,7 +1250,7 @@ private void checkAccess(OutputStream out, final boolean reply, final ExtendedBlock blk, final Token t, final Op op, - final BlockTokenSecretManager.AccessMode mode) throws IOException { + final BlockTokenIdentifier.AccessMode mode) throws IOException { if (datanode.isBlockTokenEnabled) { if (LOG.isDebugEnabled()) { LOG.debug("Checking block access token for block '" + blk.getBlockId() @@ -1264,7 +1263,7 @@ private void checkAccess(OutputStream out, final boolean reply, if (reply) { BlockOpResponseProto.Builder resp = BlockOpResponseProto.newBuilder() .setStatus(ERROR_ACCESS_TOKEN); - if (mode == BlockTokenSecretManager.AccessMode.WRITE) { + if (mode == BlockTokenIdentifier.AccessMode.WRITE) { DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk.getBlockPoolId()); // NB: Unconditionally using the xfer addr w/o hostname diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index f7d88789e05..449b7e0a0e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -197,8 +197,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.SecretManagerState; @@ -3288,7 +3287,7 @@ LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs, LocatedBlock lBlk = new LocatedBlock( getExtendedBlock(blk), locs, offset, false); getBlockManager().setBlockToken( - lBlk, BlockTokenSecretManager.AccessMode.WRITE); + lBlk, BlockTokenIdentifier.AccessMode.WRITE); return lBlk; } @@ -3350,7 +3349,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId, src, numAdditionalNodes, clientnode, chosen, excludes, preferredblocksize, storagePolicyID); final LocatedBlock lb = new LocatedBlock(blk, targets, -1, false); - blockManager.setBlockToken(lb, AccessMode.COPY); + blockManager.setBlockToken(lb, BlockTokenIdentifier.AccessMode.COPY); return lb; } @@ -6272,7 +6271,7 @@ LocatedBlock updateBlockForPipeline(ExtendedBlock block, // get a new generation stamp and an access token block.setGenerationStamp(nextGenerationStamp(blockIdManager.isLegacyBlock(block.getLocalBlock()))); locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]); - blockManager.setBlockToken(locatedBlock, AccessMode.WRITE); + blockManager.setBlockToken(locatedBlock, BlockTokenIdentifier.AccessMode.WRITE); } finally { writeUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index 1fe7ba89851..d5a94268ddc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -139,7 +139,7 @@ public GetReplicaVisibleLengthResponseProto answer( LOG.info("Got: " + id.toString()); assertTrue("Received BlockTokenIdentifier is wrong", ident.equals(id)); sm.checkAccess(id, null, PBHelper.convert(req.getBlock()), - BlockTokenSecretManager.AccessMode.WRITE); + BlockTokenIdentifier.AccessMode.WRITE); result = id.getBlockId(); } return GetReplicaVisibleLengthResponseProto.newBuilder() @@ -149,7 +149,7 @@ public GetReplicaVisibleLengthResponseProto answer( private BlockTokenIdentifier generateTokenId(BlockTokenSecretManager sm, ExtendedBlock block, - EnumSet accessModes) + EnumSet accessModes) throws IOException { Token token = sm.generateToken(block, accessModes); BlockTokenIdentifier id = sm.createIdentifier(); @@ -164,17 +164,17 @@ public void testWritable() throws Exception { BlockTokenSecretManager sm = new BlockTokenSecretManager( blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null); TestWritable.testWritable(generateTokenId(sm, block1, - EnumSet.allOf(BlockTokenSecretManager.AccessMode.class))); + EnumSet.allOf(BlockTokenIdentifier.AccessMode.class))); TestWritable.testWritable(generateTokenId(sm, block2, - EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE))); + EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE))); TestWritable.testWritable(generateTokenId(sm, block3, - EnumSet.noneOf(BlockTokenSecretManager.AccessMode.class))); + EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class))); } private void tokenGenerationAndVerification(BlockTokenSecretManager master, BlockTokenSecretManager slave) throws Exception { // single-mode tokens - for (BlockTokenSecretManager.AccessMode mode : BlockTokenSecretManager.AccessMode + for (BlockTokenIdentifier.AccessMode mode : BlockTokenIdentifier.AccessMode .values()) { // generated by master Token token1 = master.generateToken(block1, @@ -189,8 +189,8 @@ private void tokenGenerationAndVerification(BlockTokenSecretManager master, } // multi-mode tokens Token mtoken = master.generateToken(block3, - EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)); - for (BlockTokenSecretManager.AccessMode mode : BlockTokenSecretManager.AccessMode + EnumSet.allOf(BlockTokenIdentifier.AccessMode.class)); + for (BlockTokenIdentifier.AccessMode mode : BlockTokenIdentifier.AccessMode .values()) { master.checkAccess(mtoken, null, block3, mode); slave.checkAccess(mtoken, null, block3, mode); @@ -246,7 +246,7 @@ public void testBlockTokenRpc() throws Exception { BlockTokenSecretManager sm = new BlockTokenSecretManager( blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null); Token token = sm.generateToken(block3, - EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)); + EnumSet.allOf(BlockTokenIdentifier.AccessMode.class)); final Server server = createMockDatanode(sm, token, conf); @@ -285,7 +285,7 @@ public void testBlockTokenRpcLeak() throws Exception { BlockTokenSecretManager sm = new BlockTokenSecretManager( blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null); Token token = sm.generateToken(block3, - EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)); + EnumSet.allOf(BlockTokenIdentifier.AccessMode.class)); final Server server = createMockDatanode(sm, token, conf); server.start(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java index c280027781c..43f29923432 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java @@ -413,21 +413,21 @@ public void testRead() throws Exception { tryRead(conf, lblock, false); // use a valid new token lblock.setBlockToken(sm.generateToken(lblock.getBlock(), - EnumSet.of(BlockTokenSecretManager.AccessMode.READ))); + EnumSet.of(BlockTokenIdentifier.AccessMode.READ))); // read should succeed tryRead(conf, lblock, true); // use a token with wrong blockID ExtendedBlock wrongBlock = new ExtendedBlock(lblock.getBlock() .getBlockPoolId(), lblock.getBlock().getBlockId() + 1); lblock.setBlockToken(sm.generateToken(wrongBlock, - EnumSet.of(BlockTokenSecretManager.AccessMode.READ))); + EnumSet.of(BlockTokenIdentifier.AccessMode.READ))); // read should fail tryRead(conf, lblock, false); // use a token with wrong access modes lblock.setBlockToken(sm.generateToken(lblock.getBlock(), - EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE, - BlockTokenSecretManager.AccessMode.COPY, - BlockTokenSecretManager.AccessMode.REPLACE))); + EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE, + BlockTokenIdentifier.AccessMode.COPY, + BlockTokenIdentifier.AccessMode.REPLACE))); // read should fail tryRead(conf, lblock, false);