HDFS-15762. TestMultipleNNPortQOP#testMultipleNNPortOverwriteDownStream fails intermittently (#2598)

Co-authored-by: Toshihiko Uchida <toshihiko.uchida@linecorp.com>
Signed-off-by: Akira Ajisaka <aajisaka@apache.org>
(cherry picked from commit 8ec824f2ba)
This commit is contained in:
touchida 2021-01-13 11:23:07 +09:00 committed by Akira Ajisaka
parent 53460b664d
commit e2681dba22
No known key found for this signature in database
GPG Key ID: C1EDBB9CA400FD50
1 changed files with 19 additions and 42 deletions

View File

@ -25,7 +25,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@ -251,55 +250,33 @@ public class TestMultipleNNPortQOP extends SaslDataTransferTestCase {
clientConf.set(HADOOP_RPC_PROTECTION, "privacy"); clientConf.set(HADOOP_RPC_PROTECTION, "privacy");
FileSystem fsPrivacy = FileSystem.get(uriPrivacyPort, clientConf); FileSystem fsPrivacy = FileSystem.get(uriPrivacyPort, clientConf);
doTest(fsPrivacy, PATH1); doTest(fsPrivacy, PATH1);
for (int i = 0; i < 2; i++) { long count = dataNodes.stream()
DataNode dn = dataNodes.get(i); .map(dn -> dn.getSaslClient().getTargetQOP())
SaslDataTransferClient saslClient = dn.getSaslClient(); .filter("auth"::equals)
String qop = null; .count();
// It may take some time for the qop to populate // For each datanode pipeline, targetQOPs of sasl clients in the first two
// to all DNs, check in a loop. // datanodes become equal to auth.
for (int trial = 0; trial < 10; trial++) { // Note that it is not necessarily the case for all datanodes,
qop = saslClient.getTargetQOP(); // since a datanode may be always at the last position in pipelines.
if (qop != null) { assertTrue("At least two qops should be auth", count >= 2);
break;
}
Thread.sleep(100);
}
assertEquals("auth", qop);
}
clientConf.set(HADOOP_RPC_PROTECTION, "integrity"); clientConf.set(HADOOP_RPC_PROTECTION, "integrity");
FileSystem fsIntegrity = FileSystem.get(uriIntegrityPort, clientConf); FileSystem fsIntegrity = FileSystem.get(uriIntegrityPort, clientConf);
doTest(fsIntegrity, PATH2); doTest(fsIntegrity, PATH2);
for (int i = 0; i < 2; i++) { count = dataNodes.stream()
DataNode dn = dataNodes.get(i); .map(dn -> dn.getSaslClient().getTargetQOP())
SaslDataTransferClient saslClient = dn.getSaslClient(); .filter("auth"::equals)
String qop = null; .count();
for (int trial = 0; trial < 10; trial++) { assertTrue("At least two qops should be auth", count >= 2);
qop = saslClient.getTargetQOP();
if (qop != null) {
break;
}
Thread.sleep(100);
}
assertEquals("auth", qop);
}
clientConf.set(HADOOP_RPC_PROTECTION, "authentication"); clientConf.set(HADOOP_RPC_PROTECTION, "authentication");
FileSystem fsAuth = FileSystem.get(uriAuthPort, clientConf); FileSystem fsAuth = FileSystem.get(uriAuthPort, clientConf);
doTest(fsAuth, PATH3); doTest(fsAuth, PATH3);
for (int i = 0; i < 3; i++) { count = dataNodes.stream()
DataNode dn = dataNodes.get(i); .map(dn -> dn.getSaslServer().getNegotiatedQOP())
SaslDataTransferServer saslServer = dn.getSaslServer(); .filter("auth"::equals)
String qop = null; .count();
for (int trial = 0; trial < 10; trial++) { assertEquals("All qops should be auth", 3, count);
qop = saslServer.getNegotiatedQOP();
if (qop != null) {
break;
}
Thread.sleep(100);
}
assertEquals("auth", qop);
}
} finally { } finally {
if (cluster != null) { if (cluster != null) {
cluster.shutdown(); cluster.shutdown();