HDFS-12872. EC Checksum broken when BlockAccessToken is enabled.
(cherry picked from commit 56b1ff80dd
)
This commit is contained in:
parent
c49b4f7977
commit
859f57e32c
|
@ -192,6 +192,7 @@ public class LocatedBlocks {
|
|||
+ "\n underConstruction=" + underConstruction
|
||||
+ "\n blocks=" + blocks
|
||||
+ "\n lastLocatedBlock=" + lastLocatedBlock
|
||||
+ "\n isLastBlockComplete=" + isLastBlockComplete + "}";
|
||||
+ "\n isLastBlockComplete=" + isLastBlockComplete
|
||||
+ "\n ecPolicy=" + ecPolicy + "}";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1378,12 +1378,11 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
b.getStorageIDs());
|
||||
}
|
||||
sb.setBlockTokens(blockTokens);
|
||||
} else {
|
||||
b.setBlockToken(blockTokenSecretManager.generateToken(
|
||||
NameNode.getRemoteUser().getShortUserName(),
|
||||
b.getBlock(), EnumSet.of(mode), b.getStorageTypes(),
|
||||
b.getStorageIDs()));
|
||||
}
|
||||
b.setBlockToken(blockTokenSecretManager.generateToken(
|
||||
NameNode.getRemoteUser().getShortUserName(),
|
||||
b.getBlock(), EnumSet.of(mode), b.getStorageTypes(),
|
||||
b.getStorageIDs()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
|||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
@ -33,8 +34,12 @@ import org.junit.Test;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.slf4j.event.Level;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
|
||||
|
||||
/**
|
||||
* This test serves a prototype to demo the idea proposed so far. It creates two
|
||||
|
@ -77,6 +82,7 @@ public class TestFileChecksum {
|
|||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
||||
conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
Path ecPath = new Path(ecDir);
|
||||
cluster.getFileSystem().mkdir(ecPath, FsPermission.getDirDefault());
|
||||
|
@ -89,6 +95,7 @@ public class TestFileChecksum {
|
|||
bytesPerCRC = conf.getInt(
|
||||
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
|
||||
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
|
||||
GenericTestUtils.setLogLevel(FileChecksumHelper.LOG, Level.DEBUG);
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -518,7 +525,7 @@ public class TestFileChecksum {
|
|||
|
||||
LocatedBlock locatedBlock = locatedBlocks.get(0);
|
||||
DatanodeInfo[] datanodes = locatedBlock.getLocations();
|
||||
DatanodeInfo chosenDn = datanodes[0];
|
||||
DatanodeInfo chosenDn = datanodes[new Random().nextInt(datanodes.length)];
|
||||
|
||||
int idx = 0;
|
||||
for (DataNode dn : cluster.getDataNodes()) {
|
||||
|
|
Loading…
Reference in New Issue