diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Reconfigurable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Reconfigurable.java index 466915d6346..c93dc31a881 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Reconfigurable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Reconfigurable.java @@ -36,7 +36,7 @@ public interface Reconfigurable extends Configurable { * If the property cannot be changed, throw a * {@link ReconfigurationException}. */ - public String reconfigureProperty(String property, String newVal) + void reconfigureProperty(String property, String newVal) throws ReconfigurationException; /** @@ -46,12 +46,10 @@ public String reconfigureProperty(String property, String newVal) * then changeConf should not throw an exception when changing * this property. */ - public boolean isPropertyReconfigurable(String property); + boolean isPropertyReconfigurable(String property); /** * Return all the properties that can be changed at run time. */ - public Collection getReconfigurableProperties(); - - + Collection getReconfigurableProperties(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java index e50b85a8b48..681ca2bf777 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java @@ -112,14 +112,14 @@ private static class ReconfigurationThread extends Thread { // See {@link ReconfigurationServlet#applyChanges} public void run() { LOG.info("Starting reconfiguration task."); - Configuration oldConf = this.parent.getConf(); - Configuration newConf = this.parent.getNewConf(); - Collection changes = - this.parent.getChangedProperties(newConf, oldConf); + final Configuration oldConf = parent.getConf(); + final Configuration newConf = parent.getNewConf(); + final Collection changes = + parent.getChangedProperties(newConf, oldConf); Map> results = Maps.newHashMap(); for (PropertyChange change : changes) { String errorMessage = null; - if (!this.parent.isPropertyReconfigurable(change.prop)) { + if (!parent.isPropertyReconfigurable(change.prop)) { LOG.info(String.format( "Property %s is not configurable: old value: %s, new value: %s", change.prop, change.oldVal, change.newVal)); @@ -130,17 +130,23 @@ public void run() { + "\" to \"" + ((change.newVal == null) ? "" : change.newVal) + "\"."); try { - this.parent.reconfigurePropertyImpl(change.prop, change.newVal); + String effectiveValue = + parent.reconfigurePropertyImpl(change.prop, change.newVal); + if (change.newVal != null) { + oldConf.set(change.prop, effectiveValue); + } else { + oldConf.unset(change.prop); + } } catch (ReconfigurationException e) { errorMessage = e.getCause().getMessage(); } results.put(change, Optional.fromNullable(errorMessage)); } - synchronized (this.parent.reconfigLock) { - this.parent.endTime = Time.now(); - this.parent.status = Collections.unmodifiableMap(results); - this.parent.reconfigThread = null; + synchronized (parent.reconfigLock) { + parent.endTime = Time.now(); + parent.status = Collections.unmodifiableMap(results); + parent.reconfigThread = null; } } } @@ -203,21 +209,19 @@ public void shutdownReconfigurationTask() { * reconfigureProperty. */ @Override - public final String reconfigureProperty(String property, String newVal) + public final void reconfigureProperty(String property, String newVal) throws ReconfigurationException { if (isPropertyReconfigurable(property)) { LOG.info("changing property " + property + " to " + newVal); - String oldVal; synchronized(getConf()) { - oldVal = getConf().get(property); - reconfigurePropertyImpl(property, newVal); + getConf().get(property); + String effectiveValue = reconfigurePropertyImpl(property, newVal); if (newVal != null) { - getConf().set(property, newVal); + getConf().set(property, effectiveValue); } else { getConf().unset(property); } } - return oldVal; } else { throw new ReconfigurationException(property, newVal, getConf().get(property)); @@ -251,8 +255,15 @@ public boolean isPropertyReconfigurable(String property) { * that is being changed. If this object owns other Reconfigurable objects * reconfigureProperty should be called recursively to make sure that * to make sure that the configuration of these objects is updated. + * + * @param property Name of the property that is being reconfigured. + * @param newVal Proposed new value of the property. + * @return Effective new value of the property. This may be different from + * newVal. + * + * @throws ReconfigurationException if there was an error applying newVal. */ - protected abstract void reconfigurePropertyImpl(String property, String newVal) - throws ReconfigurationException; + protected abstract String reconfigurePropertyImpl( + String property, String newVal) throws ReconfigurationException; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java index 5f0516ae261..610c08a584e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java @@ -19,6 +19,7 @@ package org.apache.hadoop.conf; import com.google.common.base.Optional; +import com.google.common.base.Supplier; import com.google.common.collect.Lists; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; @@ -27,13 +28,13 @@ import org.junit.Before; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.*; import static org.junit.Assert.assertEquals; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; @@ -44,6 +45,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeoutException; public class TestReconfiguration { private Configuration conf1; @@ -129,9 +131,10 @@ public Collection getReconfigurableProperties() { } @Override - public synchronized void reconfigurePropertyImpl( + public synchronized String reconfigurePropertyImpl( String property, String newVal) throws ReconfigurationException { // do nothing + return newVal; } /** @@ -354,13 +357,14 @@ public Collection getReconfigurableProperties() { } @Override - public synchronized void reconfigurePropertyImpl(String property, + public synchronized String reconfigurePropertyImpl(String property, String newVal) throws ReconfigurationException { try { latch.await(); } catch (InterruptedException e) { // Ignore } + return newVal; } } @@ -395,9 +399,9 @@ public void testAsyncReconfigure() doReturn(false).when(dummy).isPropertyReconfigurable(eq("name2")); doReturn(true).when(dummy).isPropertyReconfigurable(eq("name3")); - doNothing().when(dummy) + doReturn("dummy").when(dummy) .reconfigurePropertyImpl(eq("name1"), anyString()); - doNothing().when(dummy) + doReturn("dummy").when(dummy) .reconfigurePropertyImpl(eq("name2"), anyString()); doThrow(new ReconfigurationException("NAME3", "NEW3", "OLD3", new IOException("io exception"))) @@ -474,4 +478,131 @@ public void testStartReconfigurationFailureDueToExistingRunningTask() GenericTestUtils.assertExceptionContains("The server is stopped", e); } } -} \ No newline at end of file + + /** + * Ensure that {@link ReconfigurableBase#reconfigureProperty} updates the + * parent's cached configuration on success. + * @throws IOException + */ + @Test (timeout=300000) + public void testConfIsUpdatedOnSuccess() throws ReconfigurationException { + final String property = "FOO"; + final String value1 = "value1"; + final String value2 = "value2"; + + final Configuration conf = new Configuration(); + conf.set(property, value1); + final Configuration newConf = new Configuration(); + newConf.set(property, value2); + + final ReconfigurableBase reconfigurable = makeReconfigurable( + conf, newConf, Arrays.asList(property)); + + reconfigurable.reconfigureProperty(property, value2); + assertThat(reconfigurable.getConf().get(property), is(value2)); + } + + /** + * Ensure that {@link ReconfigurableBase#startReconfigurationTask} updates + * its parent's cached configuration on success. + * @throws IOException + */ + @Test (timeout=300000) + public void testConfIsUpdatedOnSuccessAsync() throws ReconfigurationException, + TimeoutException, InterruptedException, IOException { + final String property = "FOO"; + final String value1 = "value1"; + final String value2 = "value2"; + + final Configuration conf = new Configuration(); + conf.set(property, value1); + final Configuration newConf = new Configuration(); + newConf.set(property, value2); + + final ReconfigurableBase reconfigurable = makeReconfigurable( + conf, newConf, Arrays.asList(property)); + + // Kick off a reconfiguration task and wait until it completes. + reconfigurable.startReconfigurationTask(); + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return reconfigurable.getReconfigurationTaskStatus().stopped(); + } + }, 100, 60000); + assertThat(reconfigurable.getConf().get(property), is(value2)); + } + + /** + * Ensure that {@link ReconfigurableBase#reconfigureProperty} unsets the + * property in its parent's configuration when the new value is null. + * @throws IOException + */ + @Test (timeout=300000) + public void testConfIsUnset() throws ReconfigurationException { + final String property = "FOO"; + final String value1 = "value1"; + + final Configuration conf = new Configuration(); + conf.set(property, value1); + final Configuration newConf = new Configuration(); + + final ReconfigurableBase reconfigurable = makeReconfigurable( + conf, newConf, Arrays.asList(property)); + + reconfigurable.reconfigureProperty(property, null); + assertNull(reconfigurable.getConf().get(property)); + } + + /** + * Ensure that {@link ReconfigurableBase#startReconfigurationTask} unsets the + * property in its parent's configuration when the new value is null. + * @throws IOException + */ + @Test (timeout=300000) + public void testConfIsUnsetAsync() throws ReconfigurationException, + IOException, TimeoutException, InterruptedException { + final String property = "FOO"; + final String value1 = "value1"; + + final Configuration conf = new Configuration(); + conf.set(property, value1); + final Configuration newConf = new Configuration(); + + final ReconfigurableBase reconfigurable = makeReconfigurable( + conf, newConf, Arrays.asList(property)); + + // Kick off a reconfiguration task and wait until it completes. + reconfigurable.startReconfigurationTask(); + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return reconfigurable.getReconfigurationTaskStatus().stopped(); + } + }, 100, 60000); + assertNull(reconfigurable.getConf().get(property)); + } + + private ReconfigurableBase makeReconfigurable( + final Configuration oldConf, final Configuration newConf, + final Collection reconfigurableProperties) { + + return new ReconfigurableBase(oldConf) { + @Override + protected Configuration getNewConf() { + return newConf; + } + + @Override + public Collection getReconfigurableProperties() { + return reconfigurableProperties; + } + + @Override + protected String reconfigurePropertyImpl( + String property, String newVal) throws ReconfigurationException { + return newVal; + } + }; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 77bcf6063f0..134336ef806 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1757,6 +1757,9 @@ Release 2.8.0 - UNRELEASED HDFS-9790. HDFS Balancer should exit with a proper message if upgrade is not finalized. (Xiaobing Zhou via Arpit Agarwal) + HDFS-9801. ReconfigurableBase should update the cached configuration. + (Arpit Agarwal) + Release 2.7.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 4f94da491a9..f41dff384a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -504,70 +504,80 @@ protected Configuration getNewConf() { return new HdfsConfiguration(); } + /** + * {@inheritdoc}. + */ @Override - public void reconfigurePropertyImpl(String property, String newVal) + public String reconfigurePropertyImpl(String property, String newVal) throws ReconfigurationException { - if (property.equals(DFS_DATANODE_DATA_DIR_KEY)) { - IOException rootException = null; - try { - LOG.info("Reconfiguring " + property + " to " + newVal); - this.refreshVolumes(newVal); - } catch (IOException e) { - rootException = e; - } finally { - // Send a full block report to let NN acknowledge the volume changes. + switch (property) { + case DFS_DATANODE_DATA_DIR_KEY: { + IOException rootException = null; try { - triggerBlockReport( - new BlockReportOptions.Factory().setIncremental(false).build()); + LOG.info("Reconfiguring " + property + " to " + newVal); + this.refreshVolumes(newVal); + return conf.get(DFS_DATANODE_DATA_DIR_KEY); } catch (IOException e) { - LOG.warn("Exception while sending the block report after refreshing" - + " volumes " + property + " to " + newVal, e); - if (rootException == null) { - rootException = e; + rootException = e; + } finally { + // Send a full block report to let NN acknowledge the volume changes. + try { + triggerBlockReport( + new BlockReportOptions.Factory().setIncremental(false).build()); + } catch (IOException e) { + LOG.warn("Exception while sending the block report after refreshing" + + " volumes " + property + " to " + newVal, e); + if (rootException == null) { + rootException = e; + } + } finally { + if (rootException != null) { + throw new ReconfigurationException(property, newVal, + getConf().get(property), rootException); + } } + } + break; + } + case DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY: { + ReconfigurationException rootException = null; + try { + LOG.info("Reconfiguring " + property + " to " + newVal); + int movers; + if (newVal == null) { + // set to default + movers = DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT; + } else { + movers = Integer.parseInt(newVal); + if (movers <= 0) { + rootException = new ReconfigurationException( + property, + newVal, + getConf().get(property), + new IllegalArgumentException( + "balancer max concurrent movers must be larger than 0")); + } + } + xserver.updateBalancerMaxConcurrentMovers(movers); + return Integer.toString(movers); + } catch (NumberFormatException nfe) { + rootException = new ReconfigurationException( + property, newVal, getConf().get(property), nfe); } finally { if (rootException != null) { - throw new ReconfigurationException(property, newVal, - getConf().get(property), rootException); + LOG.warn(String.format( + "Exception in updating balancer max concurrent movers %s to %s", + property, newVal), rootException); + throw rootException; } } + break; } - } else if (property.equals( - DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY)) { - ReconfigurationException rootException = null; - try { - LOG.info("Reconfiguring " + property + " to " + newVal); - int movers; - if (newVal == null) { - // set to default - movers = DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT; - } else { - movers = Integer.parseInt(newVal); - if (movers <= 0) { - rootException = new ReconfigurationException( - property, - newVal, - getConf().get(property), - new IllegalArgumentException( - "balancer max concurrent movers must be larger than 0")); - } - } - xserver.updateBalancerMaxConcurrentMovers(movers); - } catch(NumberFormatException nfe) { - rootException = new ReconfigurationException( - property, newVal, getConf().get(property), nfe); - } finally { - if (rootException != null) { - LOG.warn(String.format( - "Exception in updating balancer max concurrent movers %s to %s", - property, newVal), rootException); - throw rootException; - } - } - } else { - throw new ReconfigurationException( - property, newVal, getConf().get(property)); + default: + break; } + throw new ReconfigurationException( + property, newVal, getConf().get(property)); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 212d2e6ec60..725bc6ba878 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -284,7 +284,10 @@ private void addVolumes(int numNewVolumes) } String newDataDir = newDataDirBuf.toString(); - dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newDataDir); + assertThat( + "DN did not update its own config", + dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newDataDir), + is(conf.get(DFS_DATANODE_DATA_DIR_KEY))); // Verify the configuration value is appropriately set. String[] effectiveDataDirs = conf.get(DFS_DATANODE_DATA_DIR_KEY).split(","); @@ -447,8 +450,11 @@ public void testRemoveOneVolume() DataNode dn = cluster.getDataNodes().get(0); Collection oldDirs = getDataDirs(dn); String newDirs = oldDirs.iterator().next(); // Keep the first volume. - dn.reconfigurePropertyImpl( - DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs); + assertThat( + "DN did not update its own config", + dn.reconfigurePropertyImpl( + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs), + is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); assertFileLocksReleased( new ArrayList(oldDirs).subList(1, oldDirs.size())); dn.scheduleAllBlockReport(0); @@ -504,8 +510,11 @@ public void testReplicatingAfterRemoveVolume() newDirs = dir; break; } - dn.reconfigurePropertyImpl( - DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs); + assertThat( + "DN did not update its own config", + dn.reconfigurePropertyImpl( + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs), + is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); oldDirs.remove(newDirs); assertFileLocksReleased(oldDirs); @@ -651,8 +660,10 @@ public Object answer(InvocationOnMock invocation) public void run() { try { barrier.await(); - dn.reconfigurePropertyImpl( - DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs); + assertThat( + "DN did not update its own config", + dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newDirs), + is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); } catch (ReconfigurationException | InterruptedException | BrokenBarrierException e) { @@ -700,7 +711,10 @@ public void testAddBackRemovedVolume() String keepDataDir = oldDataDir.split(",")[0]; String removeDataDir = oldDataDir.split(",")[1]; - dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, keepDataDir); + assertThat( + "DN did not update its own config", + dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, keepDataDir), + is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); for (int i = 0; i < cluster.getNumNameNodes(); i++) { String bpid = cluster.getNamesystem(i).getBlockPoolId(); BlockPoolSliceStorage bpsStorage = @@ -717,7 +731,10 @@ public void testAddBackRemovedVolume() // Bring the removed directory back. It only successes if all metadata about // this directory were removed from the previous step. - dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir); + assertThat( + "DN did not update its own config", + dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir), + is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); } /** Get the FsVolume on the given basePath */ @@ -771,7 +788,10 @@ public void testDirectlyReloadAfterCheckDiskError() assertEquals(used, failedVolume.getDfsUsed()); DataNodeTestUtils.restoreDataDirFromFailure(dirToFail); - dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir); + assertThat( + "DN did not update its own config", + dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir), + is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); createFile(new Path("/test2"), 32, (short)2); FsVolumeImpl restoredVolume = getVolume(dn, dirToFail); @@ -805,7 +825,11 @@ public void testFullBlockReportAfterRemovingVolumes() // Remove a data dir from datanode File dataDirToKeep = new File(cluster.getDataDirectory(), "data1"); - dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, dataDirToKeep.toString()); + assertThat( + "DN did not update its own config", + dn.reconfigurePropertyImpl( + DFS_DATANODE_DATA_DIR_KEY, dataDirToKeep.toString()), + is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); // We should get 1 full report Mockito.verify(spy, timeout(60000).times(1)).blockReport( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 90e000bb3bc..05e6da10145 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -17,10 +17,12 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; @@ -322,13 +324,17 @@ public void testVolumeFailureRecoveredByHotSwappingVolume() // Hot swap out the failure volume. String dataDirs = dn0Vol2.getPath(); - dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, - dataDirs); + assertThat( + dn0.reconfigurePropertyImpl( + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirs), + is(dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY))); // Fix failure volume dn0Vol1 and remount it back. DataNodeTestUtils.restoreDataDirFromFailure(dn0Vol1); - dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, - oldDataDirs); + assertThat( + dn0.reconfigurePropertyImpl( + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, oldDataDirs), + is(dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY))); // Fail dn0Vol2. Now since dn0Vol1 has been fixed, DN0 has sufficient // resources, thus it should keep running. @@ -352,8 +358,11 @@ public void testTolerateVolumeFailuresAfterAddingMoreVolumes() DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY); // Add a new volume to DN0 - dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, - oldDataDirs + "," + dn0VolNew.getAbsolutePath()); + assertThat( + dn0.reconfigurePropertyImpl( + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, + oldDataDirs + "," + dn0VolNew.getAbsolutePath()), + is(dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY))); // Fail dn0Vol1 first and hot swap it. DataNodeTestUtils.injectDataDirFailure(dn0Vol1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index d25a8a2edf4..b1ea5aef6f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -19,9 +19,11 @@ import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; @@ -591,8 +593,11 @@ private static void reconfigureDataNode(DataNode dn, File... newVols) dnNewDataDirs.append(newVol.getAbsolutePath()); } try { - dn.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, - dnNewDataDirs.toString()); + assertThat( + dn.reconfigurePropertyImpl( + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, + dnNewDataDirs.toString()), + is(dn.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY))); } catch (ReconfigurationException e) { // This can be thrown if reconfiguration tries to use a failed volume. // We need to swallow the exception, because some of our tests want to