HBASE-714 Showing bytes in log when should be string (2)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@671951 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
a3c07a4035
commit
bcbcbc79f4
|
@ -68,6 +68,9 @@ Hbase Change Log
|
||||||
HBASE-702 deleteall doesn't
|
HBASE-702 deleteall doesn't
|
||||||
HBASE-704 update new shell docs and commands on help menu
|
HBASE-704 update new shell docs and commands on help menu
|
||||||
HBASE-709 Deadlock while rolling WAL-log while finishing flush
|
HBASE-709 Deadlock while rolling WAL-log while finishing flush
|
||||||
|
HBASE-710 If clocks are way off, then we can have daughter split come
|
||||||
|
before rather than after its parent in .META.
|
||||||
|
HBASE-714 Showing bytes in log when should be string (2)
|
||||||
|
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
|
|
||||||
# The java implementation to use. Required.
|
# The java implementation to use. Required.
|
||||||
# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
|
# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
|
||||||
|
export JAVA_HOME=/usr
|
||||||
|
|
||||||
# Extra Java CLASSPATH elements. Optional.
|
# Extra Java CLASSPATH elements. Optional.
|
||||||
# export HBASE_CLASSPATH=
|
# export HBASE_CLASSPATH=
|
||||||
|
|
|
@ -22,4 +22,30 @@
|
||||||
*/
|
*/
|
||||||
-->
|
-->
|
||||||
<configuration>
|
<configuration>
|
||||||
|
<!--
|
||||||
|
<property>
|
||||||
|
<name>hbase.master</name>
|
||||||
|
<value>durruti.local:60000</value>
|
||||||
|
<description>The host and port that the HBase master runs at.
|
||||||
|
A value of 'local' runs the master and a regionserver in
|
||||||
|
a single process.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hbase.rootdir</name>
|
||||||
|
<value>hdfs://durruti.local:10000/hbase</value>
|
||||||
|
<description>The directory shared by region servers.
|
||||||
|
Should be fully-qualified to include the filesystem to use.
|
||||||
|
E.g: hdfs://NAMENODE_SERVER:PORT/HBASE_ROOTDIR
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>dfs.replication</name>
|
||||||
|
<value>1</value>
|
||||||
|
<description>Default block replication.
|
||||||
|
The actual number of replications can be specified when the file is created.
|
||||||
|
The default is used if replication is not specified in create time.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
-->
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
|
@ -41,3 +41,5 @@ log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}:
|
||||||
# Custom Logging levels
|
# Custom Logging levels
|
||||||
|
|
||||||
#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
|
#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
|
||||||
|
#log4j.logger.org.apache.hadoop.hbase=DEBUG
|
||||||
|
log4j.logger.org.apache.hadoop.dfs=DEBUG
|
||||||
|
|
|
@ -175,12 +175,29 @@ public class HRegionInfo implements WritableComparable {
|
||||||
*/
|
*/
|
||||||
public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey,
|
public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey,
|
||||||
final byte [] endKey, final boolean split)
|
final byte [] endKey, final boolean split)
|
||||||
|
throws IllegalArgumentException {
|
||||||
|
this(tableDesc, startKey, endKey, split, System.currentTimeMillis());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct HRegionInfo with explicit parameters
|
||||||
|
*
|
||||||
|
* @param tableDesc the table descriptor
|
||||||
|
* @param startKey first key in region
|
||||||
|
* @param endKey end of key range
|
||||||
|
* @param split true if this region has split and we have daughter regions
|
||||||
|
* regions that may or may not hold references to this region.
|
||||||
|
* @param regionid Region id to use.
|
||||||
|
* @throws IllegalArgumentException
|
||||||
|
*/
|
||||||
|
public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey,
|
||||||
|
final byte [] endKey, final boolean split, final long regionid)
|
||||||
throws IllegalArgumentException {
|
throws IllegalArgumentException {
|
||||||
if (tableDesc == null) {
|
if (tableDesc == null) {
|
||||||
throw new IllegalArgumentException("tableDesc cannot be null");
|
throw new IllegalArgumentException("tableDesc cannot be null");
|
||||||
}
|
}
|
||||||
this.offLine = false;
|
this.offLine = false;
|
||||||
this.regionId = System.currentTimeMillis();
|
this.regionId = regionid;
|
||||||
this.regionName = createRegionName(tableDesc.getName(), startKey, regionId);
|
this.regionName = createRegionName(tableDesc.getName(), startKey, regionId);
|
||||||
this.regionNameStr = Bytes.toString(this.regionName);
|
this.regionNameStr = Bytes.toString(this.regionName);
|
||||||
this.split = split;
|
this.split = split;
|
||||||
|
|
|
@ -104,6 +104,11 @@ public class BatchOperation implements Writable {
|
||||||
return this.value != null;
|
return this.value != null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "column => " + Bytes.toString(this.column) + ", value => '...'";
|
||||||
|
}
|
||||||
|
|
||||||
// Writable methods
|
// Writable methods
|
||||||
|
|
||||||
// This is a hotspot when updating deserializing incoming client submissions.
|
// This is a hotspot when updating deserializing incoming client submissions.
|
||||||
|
|
|
@ -208,6 +208,24 @@ public class BatchUpdate implements Writable, Iterable<BatchOperation> {
|
||||||
return operations.iterator();
|
return operations.iterator();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
sb.append("row => ");
|
||||||
|
sb.append(row == null? "": Bytes.toString(row));
|
||||||
|
sb.append(", {");
|
||||||
|
boolean morethanone = false;
|
||||||
|
for (BatchOperation bo: this.operations) {
|
||||||
|
if (morethanone) {
|
||||||
|
sb.append(", ");
|
||||||
|
}
|
||||||
|
morethanone = true;
|
||||||
|
sb.append(bo.toString());
|
||||||
|
}
|
||||||
|
sb.append("}");
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Writable
|
// Writable
|
||||||
//
|
//
|
||||||
|
|
|
@ -335,7 +335,7 @@ abstract class BaseScanner extends Chore implements HConstants {
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug(split.getRegionNameAsString() +
|
LOG.debug(split.getRegionNameAsString() +
|
||||||
" no longer has references to " + parent.toString());
|
" no longer has references to " + Bytes.toString(parent));
|
||||||
}
|
}
|
||||||
|
|
||||||
BatchUpdate b = new BatchUpdate(parent);
|
BatchUpdate b = new BatchUpdate(parent);
|
||||||
|
|
|
@ -93,15 +93,14 @@ class CompactSplitThread extends Thread implements HConstants {
|
||||||
continue;
|
continue;
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
LOG.error("Compaction failed" +
|
LOG.error("Compaction failed" +
|
||||||
(r != null ? (" for region " + r.getRegionName()) : ""),
|
(r != null ? (" for region " + Bytes.toString(r.getRegionName())) : ""),
|
||||||
RemoteExceptionHandler.checkIOException(ex));
|
RemoteExceptionHandler.checkIOException(ex));
|
||||||
if (!server.checkFileSystem()) {
|
if (!server.checkFileSystem()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
LOG.error("Compaction failed" +
|
LOG.error("Compaction failed" +
|
||||||
(r != null ? (" for region " + r.getRegionName()) : ""),
|
(r != null ? (" for region " + Bytes.toString(r.getRegionName())) : ""),
|
||||||
ex);
|
ex);
|
||||||
if (!server.checkFileSystem()) {
|
if (!server.checkFileSystem()) {
|
||||||
break;
|
break;
|
||||||
|
@ -154,7 +153,7 @@ class CompactSplitThread extends Thread implements HConstants {
|
||||||
}
|
}
|
||||||
t = meta;
|
t = meta;
|
||||||
}
|
}
|
||||||
LOG.info("Updating " + t.getTableName() + " with region split info");
|
LOG.info("Updating " + Bytes.toString(t.getTableName()) + " with region split info");
|
||||||
|
|
||||||
// Mark old region as offline and split in META.
|
// Mark old region as offline and split in META.
|
||||||
// NOTE: there is no need for retry logic here. HTable does it for us.
|
// NOTE: there is no need for retry logic here. HTable does it for us.
|
||||||
|
|
|
@ -723,15 +723,23 @@ public class HRegion implements HConstants {
|
||||||
if(!this.fs.exists(splits)) {
|
if(!this.fs.exists(splits)) {
|
||||||
this.fs.mkdirs(splits);
|
this.fs.mkdirs(splits);
|
||||||
}
|
}
|
||||||
|
// Calculate regionid to use. Can't be less than that of parent else
|
||||||
|
// it'll insert into wrong location over in .META. table: HBASE-710.
|
||||||
|
long rid = System.currentTimeMillis();
|
||||||
|
if (rid < this.regionInfo.getRegionId()) {
|
||||||
|
LOG.warn("Clock skew; parent regions id is " +
|
||||||
|
this.regionInfo.getRegionId() + " but current time here is " + rid);
|
||||||
|
rid = this.regionInfo.getRegionId() + 1;
|
||||||
|
}
|
||||||
HRegionInfo regionAInfo = new HRegionInfo(this.regionInfo.getTableDesc(),
|
HRegionInfo regionAInfo = new HRegionInfo(this.regionInfo.getTableDesc(),
|
||||||
startKey, midKey);
|
startKey, midKey, false, rid);
|
||||||
Path dirA =
|
Path dirA =
|
||||||
new Path(splits, Integer.toString(regionAInfo.getEncodedName()));
|
new Path(splits, Integer.toString(regionAInfo.getEncodedName()));
|
||||||
if(fs.exists(dirA)) {
|
if(fs.exists(dirA)) {
|
||||||
throw new IOException("Cannot split; target file collision at " + dirA);
|
throw new IOException("Cannot split; target file collision at " + dirA);
|
||||||
}
|
}
|
||||||
HRegionInfo regionBInfo = new HRegionInfo(this.regionInfo.getTableDesc(),
|
HRegionInfo regionBInfo = new HRegionInfo(this.regionInfo.getTableDesc(),
|
||||||
midKey, endKey);
|
midKey, endKey, false, rid);
|
||||||
Path dirB =
|
Path dirB =
|
||||||
new Path(splits, Integer.toString(regionBInfo.getEncodedName()));
|
new Path(splits, Integer.toString(regionBInfo.getEncodedName()));
|
||||||
if(this.fs.exists(dirB)) {
|
if(this.fs.exists(dirB)) {
|
||||||
|
|
|
@ -29,6 +29,7 @@ import java.util.TreeSet;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
@ -178,8 +179,9 @@ public class SoftSortedMap<K,V> implements SortedMap<K,V> {
|
||||||
Object obj;
|
Object obj;
|
||||||
while((obj = referenceQueue.poll()) != null) {
|
while((obj = referenceQueue.poll()) != null) {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Reference for key " + ((SoftValue<K,V>)obj).key.toString() +
|
Object k = ((SoftValue<K,V>)obj).key;
|
||||||
" has been cleared.");
|
String name = (k instanceof byte [])? Bytes.toString((byte [])k): k.toString();
|
||||||
|
LOG.debug("Reference for key " + name + " has been cleared.");
|
||||||
}
|
}
|
||||||
internalMap.remove(((SoftValue<K,V>)obj).key);
|
internalMap.remove(((SoftValue<K,V>)obj).key);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue