HDFS-6424. Merge r1599397 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1599398 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
adc4dc57c8
commit
b712ed2b23
|
@ -315,6 +315,9 @@ Release 2.5.0 - UNRELEASED
|
||||||
HDFS-6404. HttpFS should use a 000 umask for mkdir and create
|
HDFS-6404. HttpFS should use a 000 umask for mkdir and create
|
||||||
operations. (yoderme via tucu)
|
operations. (yoderme via tucu)
|
||||||
|
|
||||||
|
HDFS-6424. blockReport doesn't need to invalidate blocks on SBN. (Ming Ma
|
||||||
|
via jing9)
|
||||||
|
|
||||||
Release 2.4.1 - UNRELEASED
|
Release 2.4.1 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -1050,6 +1050,9 @@ public class BlockManager {
|
||||||
* datanode and log the operation
|
* datanode and log the operation
|
||||||
*/
|
*/
|
||||||
void addToInvalidates(final Block block, final DatanodeInfo datanode) {
|
void addToInvalidates(final Block block, final DatanodeInfo datanode) {
|
||||||
|
if (!namesystem.isPopulatingReplQueues()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
invalidateBlocks.add(block, datanode, true);
|
invalidateBlocks.add(block, datanode, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1058,6 +1061,9 @@ public class BlockManager {
|
||||||
* datanodes.
|
* datanodes.
|
||||||
*/
|
*/
|
||||||
private void addToInvalidates(Block b) {
|
private void addToInvalidates(Block b) {
|
||||||
|
if (!namesystem.isPopulatingReplQueues()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
StringBuilder datanodes = new StringBuilder();
|
StringBuilder datanodes = new StringBuilder();
|
||||||
for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
|
for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
|
||||||
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
|
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
|
||||||
|
|
|
@ -0,0 +1,116 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.commons.logging.impl.Log4JLogger;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.HAUtil;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.log4j.Level;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.google.common.base.Supplier;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Makes sure that standby doesn't do the unnecessary block management such as
|
||||||
|
* invalidate block, etc.
|
||||||
|
*/
|
||||||
|
public class TestStandbyBlockManagement {
|
||||||
|
protected static final Log LOG = LogFactory.getLog(
|
||||||
|
TestStandbyBlockManagement.class);
|
||||||
|
private static final String TEST_FILE_DATA = "hello world";
|
||||||
|
private static final String TEST_FILE = "/TestStandbyBlockManagement";
|
||||||
|
private static final Path TEST_FILE_PATH = new Path(TEST_FILE);
|
||||||
|
|
||||||
|
static {
|
||||||
|
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||||
|
((Log4JLogger)LogFactory.getLog(BlockManager.class)).getLogger().setLevel(Level.ALL);
|
||||||
|
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=60000)
|
||||||
|
public void testInvalidateBlock() throws Exception {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
HAUtil.setAllowStandbyReads(conf, true);
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||||
|
.nnTopology(MiniDFSNNTopology.simpleHATopology())
|
||||||
|
.numDataNodes(3)
|
||||||
|
.build();
|
||||||
|
try {
|
||||||
|
cluster.waitActive();
|
||||||
|
cluster.transitionToActive(0);
|
||||||
|
|
||||||
|
NameNode nn1 = cluster.getNameNode(0);
|
||||||
|
NameNode nn2 = cluster.getNameNode(1);
|
||||||
|
|
||||||
|
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
|
||||||
|
|
||||||
|
Thread.sleep(1000);
|
||||||
|
LOG.info("==================================");
|
||||||
|
DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
|
||||||
|
// Have to force an edit log roll so that the standby catches up
|
||||||
|
nn1.getRpcServer().rollEditLog();
|
||||||
|
LOG.info("==================================");
|
||||||
|
|
||||||
|
// delete the file
|
||||||
|
fs.delete(TEST_FILE_PATH, false);
|
||||||
|
BlockManagerTestUtil.computeAllPendingWork(
|
||||||
|
nn1.getNamesystem().getBlockManager());
|
||||||
|
|
||||||
|
nn1.getRpcServer().rollEditLog();
|
||||||
|
|
||||||
|
// standby nn doesn't need to invalidate blocks.
|
||||||
|
assertEquals(0,
|
||||||
|
nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());
|
||||||
|
|
||||||
|
cluster.triggerHeartbeats();
|
||||||
|
cluster.triggerBlockReports();
|
||||||
|
|
||||||
|
// standby nn doesn't need to invalidate blocks.
|
||||||
|
assertEquals(0,
|
||||||
|
nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
Loading…
Reference in New Issue