HDFS-14486. The exception classes in some throw statements do not accurately describe why they are thrown. Contributed by Ayush Saxena.

This commit is contained in:
Inigo Goiri 2019-06-06 11:59:53 -07:00
parent c7e6f076df
commit e1dfc060f8
9 changed files with 101 additions and 13 deletions

View File

@ -25,6 +25,7 @@ import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.nio.ByteBuffer;
@ -1074,7 +1075,7 @@ class BlockReceiver implements Closeable {
responder.interrupt();
// do not throw if shutting down for restart.
if (!datanode.isRestarting()) {
throw new IOException("Interrupted receiveBlock");
throw new InterruptedIOException("Interrupted receiveBlock");
}
}
responder = null;

View File

@ -96,6 +96,7 @@ import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.ReconfigurableBase;
@ -1420,7 +1421,7 @@ public class DataNode extends ReconfigurableBase
int volsConfigured = dnConf.getVolsConfigured();
if (volFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT
|| volFailuresTolerated >= volsConfigured) {
throw new DiskErrorException("Invalid value configured for "
throw new HadoopIllegalArgumentException("Invalid value configured for "
+ "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
+ ". Value configured is either less than -1 or >= "
+ "to the number of configured volumes (" + volsConfigured + ").");

View File

@ -26,6 +26,8 @@ import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
@ -120,7 +122,7 @@ public class DatasetVolumeChecker {
TimeUnit.MILLISECONDS);
if (maxAllowedTimeForCheckMs <= 0) {
throw new DiskErrorException("Invalid value configured for "
throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - "
+ maxAllowedTimeForCheckMs + " (should be > 0)");
}
@ -137,7 +139,7 @@ public class DatasetVolumeChecker {
TimeUnit.MILLISECONDS);
if (minDiskCheckGapMs < 0) {
throw new DiskErrorException("Invalid value configured for "
throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY + " - "
+ minDiskCheckGapMs + " (should be >= 0)");
}
@ -148,7 +150,7 @@ public class DatasetVolumeChecker {
TimeUnit.MILLISECONDS);
if (diskCheckTimeout < 0) {
throw new DiskErrorException("Invalid value configured for "
throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - "
+ diskCheckTimeout + " (should be >= 0)");
}
@ -156,7 +158,7 @@ public class DatasetVolumeChecker {
lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs;
if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
throw new DiskErrorException("Invalid value configured for "
throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
+ maxVolumeFailuresTolerated + " "
+ DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG);

View File

@ -23,6 +23,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -93,7 +95,7 @@ public class StorageLocationChecker {
TimeUnit.MILLISECONDS);
if (maxAllowedTimeForCheckMs <= 0) {
throw new DiskErrorException("Invalid value configured for "
throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - "
+ maxAllowedTimeForCheckMs + " (should be > 0)");
}
@ -107,7 +109,7 @@ public class StorageLocationChecker {
DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);
if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
throw new DiskErrorException("Invalid value configured for "
throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
+ maxVolumeFailuresTolerated + " "
+ DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG);
@ -170,7 +172,7 @@ public class StorageLocationChecker {
}
if (maxVolumeFailuresTolerated >= dataDirs.size()) {
throw new DiskErrorException("Invalid value configured for "
throw new HadoopIllegalArgumentException("Invalid value configured for "
+ DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
+ maxVolumeFailuresTolerated + ". Value configured is >= "
+ "to the number of configured volumes (" + dataDirs.size() + ").");

View File

@ -48,6 +48,8 @@ import javax.management.ObjectName;
import javax.management.StandardMBean;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -292,7 +294,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
if (volFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT
|| volFailuresTolerated >= volsConfigured) {
throw new DiskErrorException("Invalid value configured for "
throw new HadoopIllegalArgumentException("Invalid value configured for "
+ "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
+ ". Value configured is either less than maxVolumeFailureLimit or greater than "
+ "to the number of configured volumes (" + volsConfigured + ").");

View File

@ -26,6 +26,7 @@ import java.io.File;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
@ -36,7 +37,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
@ -242,7 +242,7 @@ public class TestDataNodeVolumeFailureToleration {
prepareDirToFail(dirs[i]);
}
restartDatanodes(volumesTolerated, manageDfsDirs);
} catch (DiskErrorException e) {
} catch (HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Invalid value configured for "
+ "dfs.datanode.failed.volumes.tolerated", e);
} finally {

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.doReturn;
@ -36,6 +37,7 @@ import com.google.common.collect.Lists;
import org.junit.Assert;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -162,4 +164,14 @@ public class TestDatanodeRegister {
e.getMessage());
}
}
@Test
public void testInvalidConfigurationValue() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -2);
intercept(HadoopIllegalArgumentException.class,
"Invalid value configured for dfs.datanode.failed.volumes.tolerated"
+ " - -2 should be greater than or equal to -1",
() -> new DataNode(conf, new ArrayList<>(), null, null));
}
}

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode.checker;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.*;
@ -44,6 +46,10 @@ import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY;
import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.*;
@ -226,4 +232,35 @@ public class TestDatasetVolumeChecker {
}
return volumes;
}
@Test
public void testInvalidConfigurationValues() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, 0);
intercept(HadoopIllegalArgumentException.class,
"Invalid value configured for dfs.datanode.disk.check.timeout"
+ " - 0 (should be > 0)",
() -> new DatasetVolumeChecker(conf, new FakeTimer()));
conf.unset(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY);
conf.setInt(DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY, -1);
intercept(HadoopIllegalArgumentException.class,
"Invalid value configured for dfs.datanode.disk.check.min.gap"
+ " - -1 (should be >= 0)",
() -> new DatasetVolumeChecker(conf, new FakeTimer()));
conf.unset(DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY);
conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, -1);
intercept(HadoopIllegalArgumentException.class,
"Invalid value configured for dfs.datanode.disk.check.timeout"
+ " - -1 (should be > 0)",
() -> new DatasetVolumeChecker(conf, new FakeTimer()));
conf.unset(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY);
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -2);
intercept(HadoopIllegalArgumentException.class,
"Invalid value configured for dfs.datanode.failed.volumes.tolerated"
+ " - -2 should be greater than or equal to -1",
() -> new DatasetVolumeChecker(conf, new FakeTimer()));
}
}

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode.checker;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@ -36,6 +37,7 @@ import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY;
import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*;
@ -129,7 +131,7 @@ public class TestStorageLocationChecker {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 3);
thrown.expect(IOException.class);
thrown.expect(HadoopIllegalArgumentException.class);
thrown.expectMessage("Invalid value configured");
StorageLocationChecker checker =
new StorageLocationChecker(conf, new FakeTimer());
@ -214,4 +216,33 @@ public class TestStorageLocationChecker {
}
return locations;
}
@Test
public void testInvalidConfigurationValues() throws Exception {
final List<StorageLocation> locations =
makeMockLocations(HEALTHY, HEALTHY, HEALTHY);
Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 4);
intercept(HadoopIllegalArgumentException.class,
"Invalid value configured for dfs.datanode.failed.volumes.tolerated"
+ " - 4. Value configured is >= to the "
+ "number of configured volumes (3).",
() -> new StorageLocationChecker(conf, new FakeTimer()).check(conf,
locations));
conf.unset(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY);
conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, 0);
intercept(HadoopIllegalArgumentException.class,
"Invalid value configured for dfs.datanode.disk.check.timeout"
+ " - 0 (should be > 0)",
() -> new StorageLocationChecker(conf, new FakeTimer()));
conf.unset(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY);
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -2);
intercept(HadoopIllegalArgumentException.class,
"Invalid value configured for dfs.datanode.failed.volumes.tolerated"
+ " - -2 should be greater than or equal to -1",
() -> new StorageLocationChecker(conf, new FakeTimer()));
}
}