Merge branch 'trunk' into HDFS-6581

This commit is contained in:
arp 2014-09-19 10:02:27 -07:00
commit 222bf0fe67
4 changed files with 46 additions and 6 deletions

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -131,7 +132,7 @@ public class TestRpcProgramNfs3 {
String testRoot = fsHelper.getTestRootDir();
testRootDir = new File(testRoot).getAbsoluteFile();
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
config.set(KeyProviderFactory.KEY_PROVIDER_PATH,
config.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
ProxyUsers.refreshSuperUserGroupsConfiguration(config);

View File

@ -564,6 +564,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6970. Move startFile EDEK retries to the DFSClient. (wang)
HDFS-6948. DN rejects blocks if it has older UC block
(Eric Payne via kihwal)
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
@ -774,6 +777,11 @@ Release 2.6.0 - UNRELEASED
HDFS-6840. Clients are always sent to the same datanode when read
is off rack. (wang)
HDFS-7065. Pipeline close recovery race can cause block corruption (kihwal)
HDFS-7096. Fix TestRpcProgramNfs3 to use DFS_ENCRYPTION_KEY_PROVIDER_URI
(clamb via cmccabe)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an

View File

@ -948,7 +948,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
}
@Override // FsDatasetSpi
public String recoverClose(ExtendedBlock b, long newGS,
public synchronized String recoverClose(ExtendedBlock b, long newGS,
long expectedBlockLen) throws IOException {
LOG.info("Recover failed close " + b);
// check replica's state
@ -1152,10 +1152,18 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
ExtendedBlock b) throws IOException {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
if (replicaInfo != null) {
if (replicaInfo.getGenerationStamp() < b.getGenerationStamp()
&& replicaInfo instanceof ReplicaInPipeline) {
// Stop the previous writer
((ReplicaInPipeline)replicaInfo)
.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
} else {
throw new ReplicaAlreadyExistsException("Block " + b +
" already exists in state " + replicaInfo.getState() +
" and thus cannot be created.");
}
}
FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes());
// create a temporary file to hold block in the designated volume

View File

@ -111,7 +111,7 @@ public class TestWriteToReplica {
// test writeToTemporary
@Test
public void testWriteToTempoary() throws Exception {
public void testWriteToTemporary() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
try {
cluster.waitActive();
@ -475,5 +475,28 @@ public class TestWriteToReplica {
}
dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
try {
dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
Assert.fail("Should not have created a replica that had already been "
+ "created " + blocks[NON_EXISTENT]);
} catch (Exception e) {
Assert.assertTrue(
e.getMessage().contains(blocks[NON_EXISTENT].getBlockName()));
Assert.assertTrue(e instanceof ReplicaAlreadyExistsException);
}
long newGenStamp = blocks[NON_EXISTENT].getGenerationStamp() * 10;
blocks[NON_EXISTENT].setGenerationStamp(newGenStamp);
try {
ReplicaInPipeline replicaInfo =
dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
Assert.assertTrue(replicaInfo.getGenerationStamp() == newGenStamp);
Assert.assertTrue(
replicaInfo.getBlockId() == blocks[NON_EXISTENT].getBlockId());
} catch (ReplicaAlreadyExistsException e) {
Assert.fail("createRbw() Should have removed the block with the older "
+ "genstamp and replaced it with the newer one: " + blocks[NON_EXISTENT]);
}
}
}