HBASE-5697 Audit HBase for usage of deprecated hadoop 0.20.x, 1.x property names (Srikanth Srungarapu)

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1591045 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jonathan Hsieh 2014-04-29 18:19:37 +00:00
parent 0e367e91e3
commit 3e3b9a2f65
45 changed files with 77 additions and 82 deletions

View File

@ -64,7 +64,7 @@ org.apache.log4j.Logger.getLogger("org.apache.zookeeper").setLevel(log_level)
org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase").setLevel(log_level)
config = HBaseConfiguration.create
config.set 'fs.default.name', config.get(HConstants::HBASE_DIR)
config.set 'fs.defaultFS', config.get(HConstants::HBASE_DIR)
# wait until the master is running
admin = nil

View File

@ -255,7 +255,7 @@ public final class Compression {
Algorithm(String name) {
this.conf = new Configuration();
this.conf.setBoolean("hadoop.native.lib", true);
this.conf.setBoolean("io.native.lib.available", true);
this.compressName = name;
}

View File

@ -1001,7 +1001,7 @@ possible configurations would overwhelm and obscure the important.
the time it takes for the namenode to issue a block recovery command as part of
datanode; dfs.heartbeat.interval and the time it takes for the primary
datanode, performing block recovery to timeout on a dead datanode; usually
dfs.socket.timeout. See the end of HBASE-8389 for more.</description>
dfs.client.socket-timeout. See the end of HBASE-8389 for more.</description>
</property>
<property>
<name>hbase.column.max.version</name>

View File

@ -243,8 +243,8 @@ public class IntegrationTestImportTsv implements Configurable, Tool {
// inherit jar dependencies added to distributed cache loaded by parent job
Configuration conf = HBaseConfiguration.create(context.getConfiguration());
conf.set("mapred.job.classpath.archives",
context.getConfiguration().get("mapred.job.classpath.archives", ""));
conf.set("mapreduce.job.classpath.archives",
context.getConfiguration().get("mapreduce.job.classpath.archives", ""));
conf.set("mapreduce.job.cache.archives.visibilities",
context.getConfiguration().get("mapreduce.job.cache.archives.visibilities", ""));

View File

@ -498,7 +498,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
job.setOutputFormatClass(NullOutputFormat.class);
job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", false);
job.getConfiguration().setBoolean("mapreduce.map.speculative", false);
TableMapReduceUtil.addDependencyJars(job);
TableMapReduceUtil.addDependencyJars(job.getConfiguration(), AbstractHBaseTool.class);
TableMapReduceUtil.initCredentials(job);
@ -659,7 +659,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
VerifyMapper.class, BytesWritable.class, BytesWritable.class, job);
TableMapReduceUtil.addDependencyJars(job.getConfiguration(), AbstractHBaseTool.class);
job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", false);
job.getConfiguration().setBoolean("mapreduce.map.speculative", false);
job.setReducerClass(VerifyReducer.class);
job.setOutputFormatClass(TextOutputFormat.class);

View File

@ -181,7 +181,7 @@ public void cleanUpCluster() throws Exception {
table.setWriteBufferSize(4*1024*1024);
table.setAutoFlush(false, true);
String taskId = conf.get("mapred.task.id");
String taskId = conf.get("mapreduce.task.attempt.id");
Matcher matcher = Pattern.compile(".+_m_(\\d+_\\d+)").matcher(taskId);
if (!matcher.matches()) {
throw new RuntimeException("Strange task ID: " + taskId);

View File

@ -161,7 +161,7 @@ public class CopyTable extends Configured implements Tool {
"--peer.adr=server1,server2,server3:2181:/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable ");
System.err.println("For performance consider the following general options:\n"
+ "-Dhbase.client.scanner.caching=100\n"
+ "-Dmapred.map.tasks.speculative.execution=false");
+ "-Dmapreduce.map.speculative=false");
}
private static boolean doCommandLine(final String[] args) {

View File

@ -148,17 +148,17 @@ public class Export {
"[<starttime> [<endtime>]] [^[regex pattern] or [Prefix] to filter]]\n");
System.err.println(" Note: -D properties will be applied to the conf used. ");
System.err.println(" For example: ");
System.err.println(" -D mapred.output.compress=true");
System.err.println(" -D mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec");
System.err.println(" -D mapred.output.compression.type=BLOCK");
System.err.println(" -D mapreduce.output.fileoutputformat.compress=true");
System.err.println(" -D mapreduce.output.fileoutputformat.compress.codec=org.apache.hadoop.io.compress.GzipCodec");
System.err.println(" -D mapreduce.output.fileoutputformat.compress.type=BLOCK");
System.err.println(" Additionally, the following SCAN properties can be specified");
System.err.println(" to control/limit what is exported..");
System.err.println(" -D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=<familyName>");
System.err.println(" -D " + RAW_SCAN + "=true");
System.err.println("For performance consider the following properties:\n"
+ " -Dhbase.client.scanner.caching=100\n"
+ " -Dmapred.map.tasks.speculative.execution=false\n"
+ " -Dmapred.reduce.tasks.speculative.execution=false");
+ " -Dmapreduce.map.speculative=false\n"
+ " -Dmapreduce.reduce.speculative=false");
System.err.println("For tables with very wide rows consider setting the batch size as below:\n"
+ " -D" + EXPORT_BATCHING + "=10");
}

View File

@ -214,7 +214,7 @@ public class HLogInputFormat extends InputFormat<HLogKey, WALEdit> {
public List<InputSplit> getSplits(JobContext context) throws IOException,
InterruptedException {
Configuration conf = context.getConfiguration();
Path inputDir = new Path(conf.get("mapred.input.dir"));
Path inputDir = new Path(conf.get("mapreduce.input.fileinputformat.inputdir"));
long startTime = conf.getLong(START_TIME_KEY, Long.MIN_VALUE);
long endTime = conf.getLong(END_TIME_KEY, Long.MAX_VALUE);

View File

@ -450,8 +450,8 @@ public class Import {
+ " Filter.ReturnCode#INCLUDE and #INCLUDE_AND_NEXT_COL will be considered as including"
+ " the KeyValue.");
System.err.println("For performance consider the following options:\n"
+ " -Dmapred.map.tasks.speculative.execution=false\n"
+ " -Dmapred.reduce.tasks.speculative.execution=false\n"
+ " -Dmapreduce.map.speculative=false\n"
+ " -Dmapreduce.reduce.speculative=false\n"
+ " -D" + WAL_DURABILITY + "=<Used while writing data to hbase."
+" Allowed values are the supported durability values"
+" like SKIP_WAL/ASYNC_WAL/SYNC_WAL/...>");

View File

@ -76,7 +76,7 @@ public class ImportTsv extends Configured implements Tool {
public final static String MAPPER_CONF_KEY = "importtsv.mapper.class";
public final static String BULK_OUTPUT_CONF_KEY = "importtsv.bulk.output";
public final static String TIMESTAMP_CONF_KEY = "importtsv.timestamp";
public final static String JOB_NAME_CONF_KEY = "mapred.job.name";
public final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
// TODO: the rest of these configs are used exclusively by TsvImporterMapper.
// Move them out of the tool and let the mapper handle its own validation.
public final static String SKIP_LINES_CONF_KEY = "importtsv.skip.bad.lines";
@ -523,8 +523,8 @@ public class ImportTsv extends Configured implements Tool {
DEFAULT_MAPPER.getName() + "\n" +
" -D" + JOB_NAME_CONF_KEY + "=jobName - use the specified mapreduce job name for the import\n" +
"For performance consider the following options:\n" +
" -Dmapred.map.tasks.speculative.execution=false\n" +
" -Dmapred.reduce.tasks.speculative.execution=false";
" -Dmapreduce.map.speculative=false\n" +
" -Dmapreduce.reduce.speculative=false";
System.err.println(usage);
}

View File

@ -167,7 +167,7 @@ public class RowCounter {
"[--range=[startKey],[endKey]] [<column1> <column2>...]");
System.err.println("For performance consider the following options:\n"
+ "-Dhbase.client.scanner.caching=100\n"
+ "-Dmapred.map.tasks.speculative.execution=false");
+ "-Dmapreduce.map.speculative=false");
}
/**

View File

@ -286,8 +286,8 @@ public class WALPlayer extends Configured implements Tool {
System.err.println(" -D" + HLogInputFormat.START_TIME_KEY + "=[date|ms]");
System.err.println(" -D" + HLogInputFormat.END_TIME_KEY + "=[date|ms]");
System.err.println("For performance also consider the following options:\n"
+ " -Dmapred.map.tasks.speculative.execution=false\n"
+ " -Dmapred.reduce.tasks.speculative.execution=false");
+ " -Dmapreduce.map.speculative=false\n"
+ " -Dmapreduce.reduce.speculative=false");
}
/**

View File

@ -114,8 +114,8 @@ or review the <code>org.apache.hadoop.hbase.mapreduce.TestTableMapReduce</code>
specify source/sink table and column names in your configuration.</p>
<p>Reading from HBase, the TableInputFormat asks HBase for the list of
regions and makes a map-per-region or <code>mapred.map.tasks maps</code>,
whichever is smaller (If your job only has two maps, up mapred.map.tasks
regions and makes a map-per-region or <code>mapreduce.job.maps maps</code>,
whichever is smaller (If your job only has two maps, up mapreduce.job.maps
to a number &gt; number of regions). Maps will run on the adjacent TaskTracker
if you are running a TaskTracer and RegionServer per node.
Writing, it may make sense to avoid the reduce step and write yourself back into

View File

@ -268,8 +268,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
// Hack! Maps DFSClient => Master for logs. HDFS made this
// config param for task trackers, but we can piggyback off of it.
if (this.conf.get("mapred.task.id") == null) {
this.conf.set("mapred.task.id", "hb_m_" + this.serverName.toString());
if (this.conf.get("mapreduce.task.attempt.id") == null) {
this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
}
//should we check the compression codec type at master side, default true, HBASE-6370

View File

@ -1119,8 +1119,8 @@ public class HRegionServer extends HasThread implements
// hack! Maps DFSClient => RegionServer for logs. HDFS made this
// config param for task trackers, but we can piggyback off of it.
if (this.conf.get("mapred.task.id") == null) {
this.conf.set("mapred.task.id", "hb_rs_" +
if (this.conf.get("mapreduce.task.attempt.id") == null) {
this.conf.set("mapreduce.task.attempt.id", "hb_rs_" +
this.serverName.toString());
}

View File

@ -888,7 +888,6 @@ public abstract class FSUtils {
public static void setFsDefault(final Configuration c, final Path root) throws IOException {
c.set("fs.defaultFS", root.toString()); // for hadoop 0.21+
c.set("fs.default.name", root.toString()); // for hadoop 0.20
}
/**

View File

@ -268,9 +268,6 @@ public class Merge extends Configured implements Tool {
}
private void usage() {
System.err
.println("For hadoop 0.20, Usage: bin/hbase org.apache.hadoop.hbase.util.Merge "
+ "[-Dfs.default.name=hdfs://nn:port] <table-name> <region-1> <region-2>\n");
System.err
.println("For hadoop 0.21+, Usage: bin/hbase org.apache.hadoop.hbase.util.Merge "
+ "[-Dfs.defaultFS=hdfs://nn:port] <table-name> <region-1> <region-2>\n");

View File

@ -295,7 +295,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* single instance only is how the minidfscluster works.
*
* We also create the underlying directory for
* hadoop.log.dir, mapred.local.dir and hadoop.tmp.dir, and set the values
* hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values
* in the conf, and as a system property for hadoop.tmp.dir
*
* @return The calculated data test build directory, if newly-created.
@ -319,7 +319,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
// Read and modified in org.apache.hadoop.mapred.MiniMRCluster
createSubDir(
"mapred.local.dir",
"mapreduce.cluster.local.dir",
testPath, "mapred-local-dir");
return testPath;
@ -568,17 +568,17 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
createDirAndSetProperty("cache_data", "test.cache.data");
createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
createDirAndSetProperty("mapred_local", "mapred.local.dir");
createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
createDirAndSetProperty("mapred_local", "mapreduce.cluster.local.dir");
createDirAndSetProperty("mapred_temp", "mapreduce.cluster.temp.dir");
enableShortCircuit();
Path root = getDataTestDirOnTestFS("hadoop");
conf.set(MapreduceTestingShim.getMROutputDirProp(),
new Path(root, "mapred-output-dir").toString());
conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());
conf.set("mapreduce.jobtracker.staging.root.dir",
new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());
}
@ -2192,14 +2192,14 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
jobConf = mrCluster.createJobConf();
}
jobConf.set("mapred.local.dir",
conf.get("mapred.local.dir")); //Hadoop MiniMR overwrites this while it should not
jobConf.set("mapreduce.cluster.local.dir",
conf.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not
LOG.info("Mini mapreduce cluster started");
// In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
// Our HBase MR jobs need several of these settings in order to properly run. So we copy the
// necessary config properties here. YARN-129 required adding a few properties.
conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
// this for mrv2 support; mr1 ignores this
conf.set("mapreduce.framework.name", "yarn");
conf.setBoolean("yarn.is.minicluster", true);
@ -2228,7 +2228,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
mrCluster = null;
}
// Restore configuration to point to local jobtracker
conf.set("mapred.job.tracker", "local");
conf.set("mapreduce.jobtracker.address", "local");
LOG.info("Mini mapreduce cluster stopped");
}

View File

@ -1158,7 +1158,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
System.err.println();
System.err.println(" Note: -D properties will be applied to the conf used. ");
System.err.println(" For example: ");
System.err.println(" -Dmapred.output.compress=true");
System.err.println(" -Dmapreduce.output.fileoutputformat.compress=true");
System.err.println(" -Dmapreduce.task.timeout=60000");
System.err.println();
System.err.println("Command:");

View File

@ -48,9 +48,9 @@ public class TestFullLogReconstruction {
Configuration c = TEST_UTIL.getConfiguration();
c.setBoolean("dfs.support.append", true);
// quicker heartbeat interval for faster DN death notification
c.setInt("heartbeat.recheck.interval", 5000);
c.setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
c.setInt("dfs.heartbeat.interval", 1);
c.setInt("dfs.socket.timeout", 5000);
c.setInt("dfs.client.socket-timeout", 5000);
// faster failover with cluster.shutdown();fs.close() idiom
c.setInt("ipc.client.connect.max.retries", 1);
c.setInt("dfs.client.block.recovery.retries", 1);

View File

@ -1592,8 +1592,8 @@ public class TestAdmin {
// make sure log.hflush() calls syncFs() to open a pipeline
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
// lower the namenode & datanode heartbeat so the namenode
// quickly detects datanode failures
TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
// quickly detects datanode failures
TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
// the namenode might still try to choose the recently-dead datanode
// for a pipeline, so try to a new pipeline multiple times

View File

@ -84,7 +84,7 @@ public class TestBlockReorder {
@Before
public void setUp() throws Exception {
htu = new HBaseTestingUtility();
htu.getConfiguration().setInt("dfs.block.size", 1024);// For the test with multiple blocks
htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks
htu.getConfiguration().setBoolean("dfs.support.append", true);
htu.getConfiguration().setInt("dfs.replication", 3);
htu.startMiniDFSCluster(3,

View File

@ -298,7 +298,7 @@ public class TestHFileOutputFormat {
FileSystem fs = testDir.getFileSystem(conf);
// Set down this value or we OOME in eclipse.
conf.setInt("io.sort.mb", 20);
conf.setInt("mapreduce.task.io.sort.mb", 20);
// Write a few files.
conf.setLong(HConstants.HREGION_MAX_FILESIZE, 64 * 1024);

View File

@ -297,7 +297,7 @@ public class TestHFileOutputFormat2 {
FileSystem fs = testDir.getFileSystem(conf);
// Set down this value or we OOME in eclipse.
conf.setInt("io.sort.mb", 20);
conf.setInt("mapreduce.task.io.sort.mb", 20);
// Write a few files.
conf.setLong(HConstants.HREGION_MAX_FILESIZE, 64 * 1024);

View File

@ -145,7 +145,7 @@ public class TestHLogRecordReader {
HLogInputFormat input = new HLogInputFormat();
Configuration jobConf = new Configuration(conf);
jobConf.set("mapred.input.dir", logDir.toString());
jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString());
jobConf.setLong(HLogInputFormat.END_TIME_KEY, ts);
// only 1st file is considered, and only its 1st entry is used
@ -195,7 +195,7 @@ public class TestHLogRecordReader {
// should have 2 log files now
HLogInputFormat input = new HLogInputFormat();
Configuration jobConf = new Configuration(conf);
jobConf.set("mapred.input.dir", logDir.toString());
jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString());
// make sure both logs are found
List<InputSplit> splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf));

View File

@ -444,7 +444,7 @@ public class TestImportExport {
assertTrue(data.toString().contains("-Dimport.bulk.output=/path/for/output"));
assertTrue(data.toString().contains("-Dimport.filter.class=<name of filter class>"));
assertTrue(data.toString().contains("-Dimport.bulk.output=/path/for/output"));
assertTrue(data.toString().contains("-Dmapred.reduce.tasks.speculative.execution=false"));
assertTrue(data.toString().contains("-Dmapreduce.reduce.speculative=false"));
} finally {
System.setErr(oldPrintStream);
System.setSecurityManager(SECURITY_MANAGER);
@ -476,8 +476,8 @@ public class TestImportExport {
assertTrue(data.toString().contains("-D hbase.mapreduce.scan.column.family=<familyName>"));
assertTrue(data.toString().contains("-D hbase.mapreduce.include.deleted.rows=true"));
assertTrue(data.toString().contains("-Dhbase.client.scanner.caching=100"));
assertTrue(data.toString().contains("-Dmapred.map.tasks.speculative.execution=false"));
assertTrue(data.toString().contains("-Dmapred.reduce.tasks.speculative.execution=false"));
assertTrue(data.toString().contains("-Dmapreduce.map.speculative=false"));
assertTrue(data.toString().contains("-Dmapreduce.reduce.speculative=false"));
assertTrue(data.toString().contains("-Dhbase.export.scanner.batch=10"));
} finally {
System.setErr(oldPrintStream);
@ -653,4 +653,4 @@ public class TestImportExport {
return isVisited;
}
}
}
}

View File

@ -164,7 +164,7 @@ public class TestImportTSVWithOperationAttributes implements Configurable {
if (conf.getBoolean(FORCE_COMBINER_CONF, true)) {
LOG.debug("Forcing combiner.");
conf.setInt("min.num.spills.for.combine", 1);
conf.setInt("mapreduce.map.combine.minspills", 1);
}
// run the import

View File

@ -242,7 +242,7 @@ public class TestImportTSVWithVisibilityLabels implements Configurable {
if (conf.getBoolean(FORCE_COMBINER_CONF, true)) {
LOG.debug("Forcing combiner.");
conf.setInt("min.num.spills.for.combine", 1);
conf.setInt("mapreduce.map.combine.minspills", 1);
}
// run the import
@ -351,4 +351,4 @@ public class TestImportTSVWithVisibilityLabels implements Configurable {
assertTrue(verified);
}
}
}

View File

@ -259,7 +259,7 @@ public class TestImportTsv implements Configurable {
if (conf.getBoolean(FORCE_COMBINER_CONF, true)) {
LOG.debug("Forcing combiner.");
conf.setInt("min.num.spills.for.combine", 1);
conf.setInt("mapreduce.map.combine.minspills", 1);
}
// run the import

View File

@ -217,7 +217,7 @@ public class TestRowCounter {
"Usage: RowCounter [options] <tablename> [--range=[startKey],[endKey]] " +
"[<column1> <column2>...]"));
assertTrue(data.toString().contains("-Dhbase.client.scanner.caching=100"));
assertTrue(data.toString().contains("-Dmapred.map.tasks.speculative.execution=false"));
assertTrue(data.toString().contains("-Dmapreduce.map.speculative=false"));
}
data.reset();
try {

View File

@ -100,7 +100,7 @@ public abstract class TestTableMapReduceBase {
public void testCombiner() throws IOException {
Configuration conf = new Configuration(UTIL.getConfiguration());
// force use of combiner for testing purposes
conf.setInt("min.num.spills.for.combine", 1);
conf.setInt("mapreduce.map.combine.minspills", 1);
runTestOnTable(new HTable(conf, MULTI_REGION_TABLE_NAME));
}

View File

@ -3722,7 +3722,7 @@ public class TestHRegion {
// a copy of all replicas -- if small block size, then blocks are spread evenly across the
// the three nodes. hfilev3 with tags seems to put us over the block size. St.Ack.
// final int DEFAULT_BLOCK_SIZE = 1024;
// htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
// htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
htu.getConfiguration().setInt("dfs.replication", 2);
// set up a cluster with 3 nodes

View File

@ -83,7 +83,7 @@ public class TestJoinedScanners {
HBaseTestingUtility htu = new HBaseTestingUtility();
final int DEFAULT_BLOCK_SIZE = 1024*1024;
htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
htu.getConfiguration().setInt("dfs.replication", 1);
htu.getConfiguration().setLong("hbase.hregion.max.filesize", 322122547200L);
MiniHBaseCluster cluster = null;

View File

@ -103,9 +103,10 @@ public class TestHLog {
TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
// quicker heartbeat interval for faster DN death notification
TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
TEST_UTIL.getConfiguration().setInt("dfs.socket.timeout", 5000);
TEST_UTIL.getConfiguration().setInt("dfs.client.socket-timeout", 5000);
// faster failover with cluster.shutdown();fs.close() idiom
TEST_UTIL.getConfiguration()
.setInt("ipc.client.connect.max.retries", 1);

View File

@ -69,7 +69,7 @@ public class TestLogRollAbort {
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
// lower the namenode & datanode heartbeat so the namenode
// quickly detects datanode failures
TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
// the namenode might still try to choose the recently-dead datanode
// for a pipeline, so try to a new pipeline multiple times

View File

@ -143,7 +143,7 @@ public class TestLogRolling {
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
// lower the namenode & datanode heartbeat so the namenode
// quickly detects datanode failures
TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
// the namenode might still try to choose the recently-dead datanode
// for a pipeline, so try to a new pipeline multiple times

View File

@ -1360,7 +1360,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
System.err.println();
System.err.println(" Note: -D properties will be applied to the conf used. ");
System.err.println(" For example: ");
System.err.println(" -Dmapred.output.compress=true");
System.err.println(" -Dmapreduce.output.fileoutputformat.compress=true");
System.err.println(" -Dmapreduce.task.timeout=60000");
System.err.println();
System.err.println("Command:");

View File

@ -81,8 +81,7 @@ public class TestExportSnapshot {
conf.setInt("hbase.client.pause", 250);
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
conf.setBoolean("hbase.master.enabletable.roundrobin", true);
conf.setInt("mapreduce.map.max.attempts", 10);
conf.setInt("mapred.map.max.attempts", 10);
conf.setInt("mapreduce.map.maxattempts", 10);
}
@BeforeClass

View File

@ -403,7 +403,6 @@ public class ProcessBasedLocalHBaseCluster {
if (dfsCluster != null) {
String fsURL = "hdfs://" + HConstants.LOCALHOST + ":" + dfsCluster.getNameNodePort();
confMap.put("fs.default.name", fsURL);
confMap.put("fs.defaultFS", fsURL);
confMap.put("hbase.rootdir", fsURL + "/hbase_test");
}

View File

@ -127,7 +127,7 @@ public class TestFSUtils {
@Test public void testcomputeHDFSBlocksDistribution() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
final int DEFAULT_BLOCK_SIZE = 1024;
htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
MiniDFSCluster cluster = null;
Path testFile = null;

View File

@ -182,7 +182,7 @@ Link detected: yes
<para>Case study of configuring <code>xceivers</code>, and diagnosing errors from mis-configurations.
<link xlink:href="http://www.larsgeorge.com/2012/03/hadoop-hbase-and-xceivers.html">http://www.larsgeorge.com/2012/03/hadoop-hbase-and-xceivers.html</link>
</para>
<para>See also <xref linkend="dfs.datanode.max.xcievers"/>.
<para>See also <xref linkend="dfs.datanode.max.transfer.threads"/>.
</para>
</section>

View File

@ -352,8 +352,8 @@ Index: pom.xml
Secure HBase, see <xref linkend="hbase.secure.configuration" />.</para>
</section>
<section xml:id="dfs.datanode.max.xcievers">
<title><varname>dfs.datanode.max.xcievers</varname><indexterm>
<section xml:id="dfs.datanode.max.transfer.threads">
<title><varname>dfs.datanode.max.transfer.threads</varname><indexterm>
<primary>xcievers</primary>
</indexterm></title>
@ -365,7 +365,7 @@ Index: pom.xml
<varname>xceivers</varname> value to at least the following:
<programlisting>
&lt;property&gt;
&lt;name&gt;dfs.datanode.max.xcievers&lt;/name&gt;
&lt;name&gt;dfs.datanode.max.transfer.threads&lt;/name&gt;
&lt;value&gt;4096&lt;/value&gt;
&lt;/property&gt;
</programlisting></para>
@ -1170,8 +1170,8 @@ of all regions.
<section xml:id="spec.ex"><title>Speculative Execution</title>
<para>Speculative Execution of MapReduce tasks is on by default, and for HBase clusters it is generally advised to turn off
Speculative Execution at a system-level unless you need it for a specific case, where it can be configured per-job.
Set the properties <varname>mapred.map.tasks.speculative.execution</varname> and
<varname>mapred.reduce.tasks.speculative.execution</varname> to false.
Set the properties <varname>mapreduce.map.speculative</varname> and
<varname>mapreduce.reduce.speculative</varname> to false.
</para>
</section>
</section>

View File

@ -379,7 +379,7 @@ row10 c1 c2
</para>
<para>
WALPlayer, by default, runs as a mapreduce job. To NOT run WALPlayer as a mapreduce job on your cluster,
force it to run all in the local process by adding the flags <code>-Dmapred.job.tracker=local</code> on the command line.
force it to run all in the local process by adding the flags <code>-Dmapreduce.jobtracker.address=local</code> on the command line.
</para>
</section>
<section xml:id="rowcounter">

View File

@ -869,7 +869,7 @@ Disk-related IOException in BlockReceiver constructor. Cause is java.io.IOExcept
This typically shows up in the DataNode logs.
</para>
<para>
See the Getting Started section on <link linkend="dfs.datanode.max.xcievers">xceivers configuration</link>.
See the Getting Started section on <link linkend="dfs.datanode.max.transfer.threads">xceivers configuration</link>.
</para>
</section>
<section xml:id="trouble.rs.runtime.oom-nt">