Merge trunk into HA branch.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1226900 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
1423fff096
|
@ -83,6 +83,8 @@ Trunk (unreleased changes)
|
||||||
|
|
||||||
HADOOP-7348. Change 'addnl' in getmerge util to be a flag '-nl' instead (XieXianshan via harsh)
|
HADOOP-7348. Change 'addnl' in getmerge util to be a flag '-nl' instead (XieXianshan via harsh)
|
||||||
|
|
||||||
|
HADOOP-7919. Remove the unused hadoop.logfile.* properties from the core-default.xml file. (harsh)
|
||||||
|
|
||||||
BUGS
|
BUGS
|
||||||
|
|
||||||
HADOOP-7851. Configuration.getClasses() never returns the default value.
|
HADOOP-7851. Configuration.getClasses() never returns the default value.
|
||||||
|
@ -198,6 +200,8 @@ Release 0.23.1 - Unreleased
|
||||||
HADOOP-7890. Redirect hadoop script's deprecation message to stderr.
|
HADOOP-7890. Redirect hadoop script's deprecation message to stderr.
|
||||||
(Koji Knoguchi via mahadev)
|
(Koji Knoguchi via mahadev)
|
||||||
|
|
||||||
|
HADOOP-7504. Add the missing Ganglia31 opts to hadoop-metrics.properties as a comment. (harsh)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
|
@ -40,6 +40,7 @@ mapred.class=org.apache.hadoop.metrics.spi.NullContext
|
||||||
|
|
||||||
# Configuration of the "jvm" context for ganglia
|
# Configuration of the "jvm" context for ganglia
|
||||||
# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||||
|
# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
|
||||||
# jvm.period=10
|
# jvm.period=10
|
||||||
# jvm.servers=localhost:8649
|
# jvm.servers=localhost:8649
|
||||||
|
|
||||||
|
@ -53,6 +54,7 @@ rpc.class=org.apache.hadoop.metrics.spi.NullContext
|
||||||
|
|
||||||
# Configuration of the "rpc" context for ganglia
|
# Configuration of the "rpc" context for ganglia
|
||||||
# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||||
|
# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
|
||||||
# rpc.period=10
|
# rpc.period=10
|
||||||
# rpc.servers=localhost:8649
|
# rpc.servers=localhost:8649
|
||||||
|
|
||||||
|
@ -67,6 +69,7 @@ ugi.class=org.apache.hadoop.metrics.spi.NullContext
|
||||||
|
|
||||||
# Configuration of the "ugi" context for ganglia
|
# Configuration of the "ugi" context for ganglia
|
||||||
# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||||
|
# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
|
||||||
# ugi.period=10
|
# ugi.period=10
|
||||||
# ugi.servers=localhost:8649
|
# ugi.servers=localhost:8649
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
<!-- file system properties -->
|
<!-- file system properties -->
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.name.dir</name>
|
<name>dfs.namenode.name.dir</name>
|
||||||
<value>${HADOOP_NN_DIR}</value>
|
<value>${HADOOP_NN_DIR}</value>
|
||||||
<description>Determines where on the local filesystem the DFS name node
|
<description>Determines where on the local filesystem the DFS name node
|
||||||
should store the name table. If this is a comma-delimited list
|
should store the name table. If this is a comma-delimited list
|
||||||
|
@ -37,7 +37,7 @@
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.data.dir</name>
|
<name>dfs.datanode.data.dir</name>
|
||||||
<value>${HADOOP_DN_DIR}</value>
|
<value>${HADOOP_DN_DIR}</value>
|
||||||
<description>Determines where on the local filesystem an DFS data node
|
<description>Determines where on the local filesystem an DFS data node
|
||||||
should store its blocks. If this is a comma-delimited
|
should store its blocks. If this is a comma-delimited
|
||||||
|
@ -49,11 +49,11 @@
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.safemode.threshold.pct</name>
|
<name>dfs.namenode.safemode.threshold-pct</name>
|
||||||
<value>1.0f</value>
|
<value>1.0f</value>
|
||||||
<description>
|
<description>
|
||||||
Specifies the percentage of blocks that should satisfy
|
Specifies the percentage of blocks that should satisfy the minimal
|
||||||
the minimal replication requirement defined by dfs.replication.min.
|
replication requirement defined by dfs.namenode.replication.min.
|
||||||
Values less than or equal to 0 mean not to start in safe mode.
|
Values less than or equal to 0 mean not to start in safe mode.
|
||||||
Values greater than 1 will make safe mode permanent.
|
Values greater than 1 will make safe mode permanent.
|
||||||
</description>
|
</description>
|
||||||
|
@ -70,7 +70,7 @@
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.http.address</name>
|
<name>dfs.namenode.http-address</name>
|
||||||
<value>${HADOOP_NN_HOST}:50070</value>
|
<value>${HADOOP_NN_HOST}:50070</value>
|
||||||
<description>The name of the default file system. Either the
|
<description>The name of the default file system. Either the
|
||||||
literal string "local" or a host:port for NDFS.
|
literal string "local" or a host:port for NDFS.
|
||||||
|
@ -195,7 +195,7 @@
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.https.address</name>
|
<name>dfs.namenode.https-address</name>
|
||||||
<value>${HADOOP_NN_HOST}:50470</value>
|
<value>${HADOOP_NN_HOST}:50470</value>
|
||||||
<description>The https address where namenode binds</description>
|
<description>The https address where namenode binds</description>
|
||||||
</property>
|
</property>
|
||||||
|
@ -203,9 +203,9 @@
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.datanode.data.dir.perm</name>
|
<name>dfs.datanode.data.dir.perm</name>
|
||||||
<value>${DFS_DATANODE_DIR_PERM}</value>
|
<value>${DFS_DATANODE_DIR_PERM}</value>
|
||||||
<description>The permissions that should be there on dfs.data.dir
|
<description>The permissions that should be there on dfs.datanode.data.dir
|
||||||
directories. The datanode will not come up if the permissions are
|
directories. The datanode will not come up if the permissions are
|
||||||
different on existing dfs.data.dir directories. If the directories
|
different on existing dfs.datanode.data.dir directories. If the directories
|
||||||
don't exist, they will be created with this permission.
|
don't exist, they will be created with this permission.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
@ -237,7 +237,7 @@
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.secondary.http.address</name>
|
<name>dfs.namenode.secondary.http-address</name>
|
||||||
<value>${HADOOP_SNN_HOST}:50090</value>
|
<value>${HADOOP_SNN_HOST}:50090</value>
|
||||||
<description>
|
<description>
|
||||||
The secondary namenode http server address and port.
|
The secondary namenode http server address and port.
|
||||||
|
|
|
@ -134,20 +134,6 @@
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<!--- logging properties -->
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.logfile.size</name>
|
|
||||||
<value>10000000</value>
|
|
||||||
<description>The max size of each log file</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>hadoop.logfile.count</name>
|
|
||||||
<value>10</value>
|
|
||||||
<description>The max number of log files</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<!-- i/o properties -->
|
<!-- i/o properties -->
|
||||||
<property>
|
<property>
|
||||||
<name>io.file.buffer.size</name>
|
<name>io.file.buffer.size</name>
|
||||||
|
|
|
@ -273,10 +273,10 @@ public class FSOperations {
|
||||||
@Override
|
@Override
|
||||||
public Void execute(FileSystem fs) throws IOException {
|
public Void execute(FileSystem fs) throws IOException {
|
||||||
if (replication == -1) {
|
if (replication == -1) {
|
||||||
replication = (short) fs.getConf().getInt("dfs.replication", 3);
|
replication = fs.getDefaultReplication();
|
||||||
}
|
}
|
||||||
if (blockSize == -1) {
|
if (blockSize == -1) {
|
||||||
blockSize = fs.getConf().getInt("dfs.block.size", 67108864);
|
blockSize = fs.getDefaultBlockSize();
|
||||||
}
|
}
|
||||||
FsPermission fsPermission = getPermission(permission);
|
FsPermission fsPermission = getPermission(permission);
|
||||||
int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
|
int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
|
||||||
|
|
|
@ -131,6 +131,8 @@ Trunk (unreleased changes)
|
||||||
HDFS-2476. More CPU efficient data structure for under-replicated,
|
HDFS-2476. More CPU efficient data structure for under-replicated,
|
||||||
over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
|
over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
|
||||||
|
|
||||||
|
HDFS-554. Use System.arraycopy in BlockInfo.ensureCapacity. (harsh)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G
|
HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G
|
||||||
via atm)
|
via atm)
|
||||||
|
@ -254,6 +256,10 @@ Release 0.23.1 - UNRELEASED
|
||||||
HDFS-2335. DataNodeCluster and NNStorage always pull fresh entropy.
|
HDFS-2335. DataNodeCluster and NNStorage always pull fresh entropy.
|
||||||
(Uma Maheswara Rao G via eli)
|
(Uma Maheswara Rao G via eli)
|
||||||
|
|
||||||
|
HDFS-2574. Remove references to some deprecated properties in conf templates and defaults files. (Joe Crobak via harsh)
|
||||||
|
|
||||||
|
HDFS-2722. HttpFs should not be using an int for block size. (harsh)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-2130. Switch default checksum to CRC32C. (todd)
|
HDFS-2130. Switch default checksum to CRC32C. (todd)
|
||||||
|
|
|
@ -173,9 +173,7 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
|
||||||
* happen only when replication is manually increased by the user. */
|
* happen only when replication is manually increased by the user. */
|
||||||
Object[] old = triplets;
|
Object[] old = triplets;
|
||||||
triplets = new Object[(last+num)*3];
|
triplets = new Object[(last+num)*3];
|
||||||
for(int i=0; i < last*3; i++) {
|
System.arraycopy(old, 0, triplets, 0, last*3);
|
||||||
triplets[i] = old[i];
|
|
||||||
}
|
|
||||||
return last;
|
return last;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,11 +22,11 @@
|
||||||
<value>${HADOOP_REPLICATION}</value>
|
<value>${HADOOP_REPLICATION}</value>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.name.dir</name>
|
<name>dfs.namenode.name.dir</name>
|
||||||
<value>${HADOOP_NN_DIR}</value>
|
<value>${HADOOP_NN_DIR}</value>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.data.dir</name>
|
<name>dfs.datanode.data.dir</name>
|
||||||
<value>${HADOOP_DN_DIR}</value>
|
<value>${HADOOP_DN_DIR}</value>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
|
|
|
@ -199,8 +199,8 @@ creations/deletions), or "all".</description>
|
||||||
<name>dfs.namenode.name.dir.restore</name>
|
<name>dfs.namenode.name.dir.restore</name>
|
||||||
<value>false</value>
|
<value>false</value>
|
||||||
<description>Set to true to enable NameNode to attempt recovering a
|
<description>Set to true to enable NameNode to attempt recovering a
|
||||||
previously failed dfs.name.dir. When enabled, a recovery of any failed
|
previously failed dfs.namenode.name.dir. When enabled, a recovery of any
|
||||||
directory is attempted during checkpoint.</description>
|
failed directory is attempted during checkpoint.</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
|
@ -223,7 +223,7 @@ creations/deletions), or "all".</description>
|
||||||
<description>Determines where on the local filesystem the DFS name node
|
<description>Determines where on the local filesystem the DFS name node
|
||||||
should store the transaction (edits) file. If this is a comma-delimited list
|
should store the transaction (edits) file. If this is a comma-delimited list
|
||||||
of directories then the transaction file is replicated in all of the
|
of directories then the transaction file is replicated in all of the
|
||||||
directories, for redundancy. Default value is same as dfs.name.dir
|
directories, for redundancy. Default value is same as dfs.namenode.name.dir
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue