HADOOP-12733. Remove references to obsolete io.seqfile configuration variables. Contributed by Ray Chiang.

(cherry picked from commit 01d31fe938)
This commit is contained in:
Akira Ajisaka 2017-01-04 14:10:36 +09:00
parent dc2e44e8f8
commit 3e2d26a97f
4 changed files with 0 additions and 26 deletions

View File

@ -1259,22 +1259,6 @@
</description> </description>
</property> </property>
<property>
<name>io.seqfile.lazydecompress</name>
<value>true</value>
<description>Should values of block-compressed SequenceFiles be decompressed
only when necessary.
</description>
</property>
<property>
<name>io.seqfile.sorter.recordlimit</name>
<value>1000000</value>
<description>The limit on number of records to be kept in memory in a spill
in SequenceFiles.Sorter
</description>
</property>
<property> <property>
<name>io.mapfile.bloom.size</name> <name>io.mapfile.bloom.size</name>
<value>1048576</value> <value>1048576</value>

View File

@ -120,8 +120,6 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
configurationPropsToSkipCompare.add("dr.who"); configurationPropsToSkipCompare.add("dr.who");
// XML deprecated properties. // XML deprecated properties.
xmlPropsToSkipCompare.add("io.seqfile.lazydecompress");
xmlPropsToSkipCompare.add("io.seqfile.sorter.recordlimit");
// - org.apache.hadoop.hdfs.client.HdfsClientConfigKeys // - org.apache.hadoop.hdfs.client.HdfsClientConfigKeys
xmlPropsToSkipCompare xmlPropsToSkipCompare
.add("io.bytes.per.checksum"); .add("io.bytes.per.checksum");

View File

@ -16,7 +16,6 @@
<property><!--Loaded from job.xml--><name>mapreduce.reduce.shuffle.memory.limit.percent</name><value>0.25</value></property> <property><!--Loaded from job.xml--><name>mapreduce.reduce.shuffle.memory.limit.percent</name><value>0.25</value></property>
<property><!--Loaded from job.xml--><name>hadoop.http.authentication.kerberos.keytab</name><value>${user.home}/hadoop.keytab</value></property> <property><!--Loaded from job.xml--><name>hadoop.http.authentication.kerberos.keytab</name><value>${user.home}/hadoop.keytab</value></property>
<property><!--Loaded from job.xml--><name>yarn.nodemanager.keytab</name><value>/etc/krb5.keytab</value></property> <property><!--Loaded from job.xml--><name>yarn.nodemanager.keytab</name><value>/etc/krb5.keytab</value></property>
<property><!--Loaded from job.xml--><name>io.seqfile.sorter.recordlimit</name><value>1000000</value></property>
<property><!--Loaded from job.xml--><name>s3.blocksize</name><value>67108864</value></property> <property><!--Loaded from job.xml--><name>s3.blocksize</name><value>67108864</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.task.io.sort.factor</name><value>10</value></property> <property><!--Loaded from job.xml--><name>mapreduce.task.io.sort.factor</name><value>10</value></property>
<property><!--Loaded from job.xml--><name>yarn.nodemanager.disk-health-checker.interval-ms</name><value>120000</value></property> <property><!--Loaded from job.xml--><name>yarn.nodemanager.disk-health-checker.interval-ms</name><value>120000</value></property>
@ -111,7 +110,6 @@
<property><!--Loaded from job.xml--><name>dfs.client.block.write.retries</name><value>3</value></property> <property><!--Loaded from job.xml--><name>dfs.client.block.write.retries</name><value>3</value></property>
<property><!--Loaded from job.xml--><name>hadoop.proxyuser.user.groups</name><value>users</value></property> <property><!--Loaded from job.xml--><name>hadoop.proxyuser.user.groups</name><value>users</value></property>
<property><!--Loaded from job.xml--><name>dfs.namenode.name.dir.restore</name><value>false</value></property> <property><!--Loaded from job.xml--><name>dfs.namenode.name.dir.restore</name><value>false</value></property>
<property><!--Loaded from job.xml--><name>io.seqfile.lazydecompress</name><value>true</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.reduce.merge.inmem.threshold</name><value>1000</value></property> <property><!--Loaded from job.xml--><name>mapreduce.reduce.merge.inmem.threshold</name><value>1000</value></property>
<property><!--Loaded from job.xml--><name>mapreduce.input.fileinputformat.split.minsize</name><value>0</value></property> <property><!--Loaded from job.xml--><name>mapreduce.input.fileinputformat.split.minsize</name><value>0</value></property>
<property><!--Loaded from job.xml--><name>dfs.replication</name><value>3</value></property> <property><!--Loaded from job.xml--><name>dfs.replication</name><value>3</value></property>

View File

@ -4545,7 +4545,6 @@
"hadoop.ssl.keystores.factory.class" : "org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory", "hadoop.ssl.keystores.factory.class" : "org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory",
"hadoop.http.authentication.kerberos.keytab" : "${user.home}/hadoop.keytab", "hadoop.http.authentication.kerberos.keytab" : "${user.home}/hadoop.keytab",
"yarn.nodemanager.keytab" : "/etc/krb5.keytab", "yarn.nodemanager.keytab" : "/etc/krb5.keytab",
"io.seqfile.sorter.recordlimit" : "1000000",
"s3.blocksize" : "67108864", "s3.blocksize" : "67108864",
"mapreduce.task.io.sort.factor" : "10", "mapreduce.task.io.sort.factor" : "10",
"yarn.nodemanager.disk-health-checker.interval-ms" : "120000", "yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
@ -4671,7 +4670,6 @@
"rpc.engine.org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB" : "org.apache.hadoop.ipc.ProtobufRpcEngine", "rpc.engine.org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB" : "org.apache.hadoop.ipc.ProtobufRpcEngine",
"dfs.datanode.hdfs-blocks-metadata.enabled" : "true", "dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
"ha.zookeeper.parent-znode" : "/hadoop-ha", "ha.zookeeper.parent-znode" : "/hadoop-ha",
"io.seqfile.lazydecompress" : "true",
"mapreduce.reduce.merge.inmem.threshold" : "1000", "mapreduce.reduce.merge.inmem.threshold" : "1000",
"mapreduce.input.fileinputformat.split.minsize" : "0", "mapreduce.input.fileinputformat.split.minsize" : "0",
"dfs.replication" : "3", "dfs.replication" : "3",
@ -9654,7 +9652,6 @@
"hadoop.ssl.keystores.factory.class" : "org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory", "hadoop.ssl.keystores.factory.class" : "org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory",
"hadoop.http.authentication.kerberos.keytab" : "${user.home}/hadoop.keytab", "hadoop.http.authentication.kerberos.keytab" : "${user.home}/hadoop.keytab",
"yarn.nodemanager.keytab" : "/etc/krb5.keytab", "yarn.nodemanager.keytab" : "/etc/krb5.keytab",
"io.seqfile.sorter.recordlimit" : "1000000",
"s3.blocksize" : "67108864", "s3.blocksize" : "67108864",
"mapreduce.task.io.sort.factor" : "10", "mapreduce.task.io.sort.factor" : "10",
"yarn.nodemanager.disk-health-checker.interval-ms" : "120000", "yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
@ -9780,7 +9777,6 @@
"rpc.engine.org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB" : "org.apache.hadoop.ipc.ProtobufRpcEngine", "rpc.engine.org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB" : "org.apache.hadoop.ipc.ProtobufRpcEngine",
"dfs.datanode.hdfs-blocks-metadata.enabled" : "true", "dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
"ha.zookeeper.parent-znode" : "/hadoop-ha", "ha.zookeeper.parent-znode" : "/hadoop-ha",
"io.seqfile.lazydecompress" : "true",
"mapreduce.reduce.merge.inmem.threshold" : "1000", "mapreduce.reduce.merge.inmem.threshold" : "1000",
"mapreduce.input.fileinputformat.split.minsize" : "0", "mapreduce.input.fileinputformat.split.minsize" : "0",
"dfs.replication" : "3", "dfs.replication" : "3",
@ -10263,7 +10259,6 @@
"hadoop.ssl.keystores.factory.class" : "org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory", "hadoop.ssl.keystores.factory.class" : "org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory",
"hadoop.http.authentication.kerberos.keytab" : "${user.home}/hadoop.keytab", "hadoop.http.authentication.kerberos.keytab" : "${user.home}/hadoop.keytab",
"yarn.nodemanager.keytab" : "/etc/krb5.keytab", "yarn.nodemanager.keytab" : "/etc/krb5.keytab",
"io.seqfile.sorter.recordlimit" : "1000000",
"s3.blocksize" : "67108864", "s3.blocksize" : "67108864",
"mapreduce.task.io.sort.factor" : "10", "mapreduce.task.io.sort.factor" : "10",
"yarn.nodemanager.disk-health-checker.interval-ms" : "120000", "yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
@ -10389,7 +10384,6 @@
"rpc.engine.org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB" : "org.apache.hadoop.ipc.ProtobufRpcEngine", "rpc.engine.org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB" : "org.apache.hadoop.ipc.ProtobufRpcEngine",
"dfs.datanode.hdfs-blocks-metadata.enabled" : "true", "dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
"ha.zookeeper.parent-znode" : "/hadoop-ha", "ha.zookeeper.parent-znode" : "/hadoop-ha",
"io.seqfile.lazydecompress" : "true",
"mapreduce.reduce.merge.inmem.threshold" : "1000", "mapreduce.reduce.merge.inmem.threshold" : "1000",
"mapreduce.input.fileinputformat.split.minsize" : "0", "mapreduce.input.fileinputformat.split.minsize" : "0",
"dfs.replication" : "3", "dfs.replication" : "3",