Merge trunk into HA branch.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1226900 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
1423fff096
|
@ -83,6 +83,8 @@ Trunk (unreleased changes)
|
|||
|
||||
HADOOP-7348. Change 'addnl' in getmerge util to be a flag '-nl' instead (XieXianshan via harsh)
|
||||
|
||||
HADOOP-7919. Remove the unused hadoop.logfile.* properties from the core-default.xml file. (harsh)
|
||||
|
||||
BUGS
|
||||
|
||||
HADOOP-7851. Configuration.getClasses() never returns the default value.
|
||||
|
@ -198,6 +200,8 @@ Release 0.23.1 - Unreleased
|
|||
HADOOP-7890. Redirect hadoop script's deprecation message to stderr.
|
||||
(Koji Knoguchi via mahadev)
|
||||
|
||||
HADOOP-7504. Add the missing Ganglia31 opts to hadoop-metrics.properties as a comment. (harsh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
|
|
@ -40,6 +40,7 @@ mapred.class=org.apache.hadoop.metrics.spi.NullContext
|
|||
|
||||
# Configuration of the "jvm" context for ganglia
|
||||
# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||
# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
|
||||
# jvm.period=10
|
||||
# jvm.servers=localhost:8649
|
||||
|
||||
|
@ -53,6 +54,7 @@ rpc.class=org.apache.hadoop.metrics.spi.NullContext
|
|||
|
||||
# Configuration of the "rpc" context for ganglia
|
||||
# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||
# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
|
||||
# rpc.period=10
|
||||
# rpc.servers=localhost:8649
|
||||
|
||||
|
@ -67,6 +69,7 @@ ugi.class=org.apache.hadoop.metrics.spi.NullContext
|
|||
|
||||
# Configuration of the "ugi" context for ganglia
|
||||
# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext
|
||||
# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
|
||||
# ugi.period=10
|
||||
# ugi.servers=localhost:8649
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
<!-- file system properties -->
|
||||
|
||||
<property>
|
||||
<name>dfs.name.dir</name>
|
||||
<name>dfs.namenode.name.dir</name>
|
||||
<value>${HADOOP_NN_DIR}</value>
|
||||
<description>Determines where on the local filesystem the DFS name node
|
||||
should store the name table. If this is a comma-delimited list
|
||||
|
@ -37,7 +37,7 @@
|
|||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.data.dir</name>
|
||||
<name>dfs.datanode.data.dir</name>
|
||||
<value>${HADOOP_DN_DIR}</value>
|
||||
<description>Determines where on the local filesystem an DFS data node
|
||||
should store its blocks. If this is a comma-delimited
|
||||
|
@ -49,11 +49,11 @@
|
|||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.safemode.threshold.pct</name>
|
||||
<name>dfs.namenode.safemode.threshold-pct</name>
|
||||
<value>1.0f</value>
|
||||
<description>
|
||||
Specifies the percentage of blocks that should satisfy
|
||||
the minimal replication requirement defined by dfs.replication.min.
|
||||
Specifies the percentage of blocks that should satisfy the minimal
|
||||
replication requirement defined by dfs.namenode.replication.min.
|
||||
Values less than or equal to 0 mean not to start in safe mode.
|
||||
Values greater than 1 will make safe mode permanent.
|
||||
</description>
|
||||
|
@ -70,7 +70,7 @@
|
|||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.http.address</name>
|
||||
<name>dfs.namenode.http-address</name>
|
||||
<value>${HADOOP_NN_HOST}:50070</value>
|
||||
<description>The name of the default file system. Either the
|
||||
literal string "local" or a host:port for NDFS.
|
||||
|
@ -195,7 +195,7 @@
|
|||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.https.address</name>
|
||||
<name>dfs.namenode.https-address</name>
|
||||
<value>${HADOOP_NN_HOST}:50470</value>
|
||||
<description>The https address where namenode binds</description>
|
||||
</property>
|
||||
|
@ -203,9 +203,9 @@
|
|||
<property>
|
||||
<name>dfs.datanode.data.dir.perm</name>
|
||||
<value>${DFS_DATANODE_DIR_PERM}</value>
|
||||
<description>The permissions that should be there on dfs.data.dir
|
||||
<description>The permissions that should be there on dfs.datanode.data.dir
|
||||
directories. The datanode will not come up if the permissions are
|
||||
different on existing dfs.data.dir directories. If the directories
|
||||
different on existing dfs.datanode.data.dir directories. If the directories
|
||||
don't exist, they will be created with this permission.
|
||||
</description>
|
||||
</property>
|
||||
|
@ -237,7 +237,7 @@
|
|||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.secondary.http.address</name>
|
||||
<name>dfs.namenode.secondary.http-address</name>
|
||||
<value>${HADOOP_SNN_HOST}:50090</value>
|
||||
<description>
|
||||
The secondary namenode http server address and port.
|
||||
|
|
|
@ -134,20 +134,6 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<!--- logging properties -->
|
||||
|
||||
<property>
|
||||
<name>hadoop.logfile.size</name>
|
||||
<value>10000000</value>
|
||||
<description>The max size of each log file</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.logfile.count</name>
|
||||
<value>10</value>
|
||||
<description>The max number of log files</description>
|
||||
</property>
|
||||
|
||||
<!-- i/o properties -->
|
||||
<property>
|
||||
<name>io.file.buffer.size</name>
|
||||
|
|
|
@ -273,10 +273,10 @@ public class FSOperations {
|
|||
@Override
|
||||
public Void execute(FileSystem fs) throws IOException {
|
||||
if (replication == -1) {
|
||||
replication = (short) fs.getConf().getInt("dfs.replication", 3);
|
||||
replication = fs.getDefaultReplication();
|
||||
}
|
||||
if (blockSize == -1) {
|
||||
blockSize = fs.getConf().getInt("dfs.block.size", 67108864);
|
||||
blockSize = fs.getDefaultBlockSize();
|
||||
}
|
||||
FsPermission fsPermission = getPermission(permission);
|
||||
int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
|
||||
|
|
|
@ -131,6 +131,8 @@ Trunk (unreleased changes)
|
|||
HDFS-2476. More CPU efficient data structure for under-replicated,
|
||||
over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
|
||||
|
||||
HDFS-554. Use System.arraycopy in BlockInfo.ensureCapacity. (harsh)
|
||||
|
||||
BUG FIXES
|
||||
HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G
|
||||
via atm)
|
||||
|
@ -254,6 +256,10 @@ Release 0.23.1 - UNRELEASED
|
|||
HDFS-2335. DataNodeCluster and NNStorage always pull fresh entropy.
|
||||
(Uma Maheswara Rao G via eli)
|
||||
|
||||
HDFS-2574. Remove references to some deprecated properties in conf templates and defaults files. (Joe Crobak via harsh)
|
||||
|
||||
HDFS-2722. HttpFs should not be using an int for block size. (harsh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-2130. Switch default checksum to CRC32C. (todd)
|
||||
|
|
|
@ -173,9 +173,7 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
|
|||
* happen only when replication is manually increased by the user. */
|
||||
Object[] old = triplets;
|
||||
triplets = new Object[(last+num)*3];
|
||||
for(int i=0; i < last*3; i++) {
|
||||
triplets[i] = old[i];
|
||||
}
|
||||
System.arraycopy(old, 0, triplets, 0, last*3);
|
||||
return last;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,11 +22,11 @@
|
|||
<value>${HADOOP_REPLICATION}</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.name.dir</name>
|
||||
<name>dfs.namenode.name.dir</name>
|
||||
<value>${HADOOP_NN_DIR}</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.data.dir</name>
|
||||
<name>dfs.datanode.data.dir</name>
|
||||
<value>${HADOOP_DN_DIR}</value>
|
||||
</property>
|
||||
<property>
|
||||
|
|
|
@ -199,8 +199,8 @@ creations/deletions), or "all".</description>
|
|||
<name>dfs.namenode.name.dir.restore</name>
|
||||
<value>false</value>
|
||||
<description>Set to true to enable NameNode to attempt recovering a
|
||||
previously failed dfs.name.dir. When enabled, a recovery of any failed
|
||||
directory is attempted during checkpoint.</description>
|
||||
previously failed dfs.namenode.name.dir. When enabled, a recovery of any
|
||||
failed directory is attempted during checkpoint.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
|
@ -223,7 +223,7 @@ creations/deletions), or "all".</description>
|
|||
<description>Determines where on the local filesystem the DFS name node
|
||||
should store the transaction (edits) file. If this is a comma-delimited list
|
||||
of directories then the transaction file is replicated in all of the
|
||||
directories, for redundancy. Default value is same as dfs.name.dir
|
||||
directories, for redundancy. Default value is same as dfs.namenode.name.dir
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
|
Loading…
Reference in New Issue