Merge branch 'trunk' into HDFS-6581
This commit is contained in:
commit
222bf0fe67
|
@ -34,6 +34,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
@ -131,7 +132,7 @@ public class TestRpcProgramNfs3 {
|
||||||
String testRoot = fsHelper.getTestRootDir();
|
String testRoot = fsHelper.getTestRootDir();
|
||||||
testRootDir = new File(testRoot).getAbsoluteFile();
|
testRootDir = new File(testRoot).getAbsoluteFile();
|
||||||
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
|
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
|
||||||
config.set(KeyProviderFactory.KEY_PROVIDER_PATH,
|
config.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
|
||||||
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
|
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
|
||||||
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
|
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
|
||||||
|
|
||||||
|
|
|
@ -564,6 +564,9 @@ Release 2.6.0 - UNRELEASED
|
||||||
|
|
||||||
HDFS-6970. Move startFile EDEK retries to the DFSClient. (wang)
|
HDFS-6970. Move startFile EDEK retries to the DFSClient. (wang)
|
||||||
|
|
||||||
|
HDFS-6948. DN rejects blocks if it has older UC block
|
||||||
|
(Eric Payne via kihwal)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||||
|
@ -774,6 +777,11 @@ Release 2.6.0 - UNRELEASED
|
||||||
HDFS-6840. Clients are always sent to the same datanode when read
|
HDFS-6840. Clients are always sent to the same datanode when read
|
||||||
is off rack. (wang)
|
is off rack. (wang)
|
||||||
|
|
||||||
|
HDFS-7065. Pipeline close recovery race can cause block corruption (kihwal)
|
||||||
|
|
||||||
|
HDFS-7096. Fix TestRpcProgramNfs3 to use DFS_ENCRYPTION_KEY_PROVIDER_URI
|
||||||
|
(clamb via cmccabe)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-6387. HDFS CLI admin tool for creating & deleting an
|
HDFS-6387. HDFS CLI admin tool for creating & deleting an
|
||||||
|
|
|
@ -948,7 +948,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override // FsDatasetSpi
|
@Override // FsDatasetSpi
|
||||||
public String recoverClose(ExtendedBlock b, long newGS,
|
public synchronized String recoverClose(ExtendedBlock b, long newGS,
|
||||||
long expectedBlockLen) throws IOException {
|
long expectedBlockLen) throws IOException {
|
||||||
LOG.info("Recover failed close " + b);
|
LOG.info("Recover failed close " + b);
|
||||||
// check replica's state
|
// check replica's state
|
||||||
|
@ -1152,9 +1152,17 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
||||||
ExtendedBlock b) throws IOException {
|
ExtendedBlock b) throws IOException {
|
||||||
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
|
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
|
||||||
if (replicaInfo != null) {
|
if (replicaInfo != null) {
|
||||||
throw new ReplicaAlreadyExistsException("Block " + b +
|
if (replicaInfo.getGenerationStamp() < b.getGenerationStamp()
|
||||||
" already exists in state " + replicaInfo.getState() +
|
&& replicaInfo instanceof ReplicaInPipeline) {
|
||||||
" and thus cannot be created.");
|
// Stop the previous writer
|
||||||
|
((ReplicaInPipeline)replicaInfo)
|
||||||
|
.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
|
||||||
|
invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
|
||||||
|
} else {
|
||||||
|
throw new ReplicaAlreadyExistsException("Block " + b +
|
||||||
|
" already exists in state " + replicaInfo.getState() +
|
||||||
|
" and thus cannot be created.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes());
|
FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes());
|
||||||
|
|
|
@ -111,7 +111,7 @@ public class TestWriteToReplica {
|
||||||
|
|
||||||
// test writeToTemporary
|
// test writeToTemporary
|
||||||
@Test
|
@Test
|
||||||
public void testWriteToTempoary() throws Exception {
|
public void testWriteToTemporary() throws Exception {
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
|
||||||
try {
|
try {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
@ -475,5 +475,28 @@ public class TestWriteToReplica {
|
||||||
}
|
}
|
||||||
|
|
||||||
dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
|
dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
|
||||||
|
|
||||||
|
try {
|
||||||
|
dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
|
||||||
|
Assert.fail("Should not have created a replica that had already been "
|
||||||
|
+ "created " + blocks[NON_EXISTENT]);
|
||||||
|
} catch (Exception e) {
|
||||||
|
Assert.assertTrue(
|
||||||
|
e.getMessage().contains(blocks[NON_EXISTENT].getBlockName()));
|
||||||
|
Assert.assertTrue(e instanceof ReplicaAlreadyExistsException);
|
||||||
|
}
|
||||||
|
|
||||||
|
long newGenStamp = blocks[NON_EXISTENT].getGenerationStamp() * 10;
|
||||||
|
blocks[NON_EXISTENT].setGenerationStamp(newGenStamp);
|
||||||
|
try {
|
||||||
|
ReplicaInPipeline replicaInfo =
|
||||||
|
dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
|
||||||
|
Assert.assertTrue(replicaInfo.getGenerationStamp() == newGenStamp);
|
||||||
|
Assert.assertTrue(
|
||||||
|
replicaInfo.getBlockId() == blocks[NON_EXISTENT].getBlockId());
|
||||||
|
} catch (ReplicaAlreadyExistsException e) {
|
||||||
|
Assert.fail("createRbw() Should have removed the block with the older "
|
||||||
|
+ "genstamp and replaced it with the newer one: " + blocks[NON_EXISTENT]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue