HBASE-4229 Replace Jettison JSON encoding with Jackson in HLogPrettyPrinter

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1159914 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2011-08-20 20:27:09 +00:00
parent 95c39ae2ab
commit 38a046ea3c
4 changed files with 64 additions and 101 deletions

View File

@ -383,6 +383,8 @@ Release 0.91.0 - Unreleased
o.a.h.h.HConstants (Mingjie Lai)
HBASE-4227 Modify the webUI so that default values of column families are
not shown (Nileema Shingte)
HBASE-4229 Replace Jettison JSON encoding with Jackson in HLogPrettyPrinter
(Riley Patterson)
TASKS
HBASE-3559 Move report of split to master OFF the heartbeat channel

View File

@ -42,11 +42,11 @@ import org.apache.hadoop.fs.FileSystem
import org.apache.commons.logging.LogFactory
# Name of this script
NAME = "add_table"
NAME = "add_region"
# Print usage for this script
def usage
puts 'Usage: %s.rb TABLE_DIR [alternate_tablename]' % NAME
puts 'Usage: %s.rb <PATH_TO_REGIONINFO>' % NAME
exit!
end
@ -63,85 +63,20 @@ fs = FileSystem.get(c)
LOG = LogFactory.getLog(NAME)
# Check arguments
if ARGV.size < 1 || ARGV.size > 2
if ARGV.size != 1
usage
end
# Get cmdline args.
srcdir = fs.makeQualified(Path.new(java.lang.String.new(ARGV[0])))
regioninfo = fs.makeQualified(Path.new(java.lang.String.new(ARGV[0])))
if not fs.exists(srcdir)
raise IOError.new("src dir " + srcdir.toString() + " doesn't exist!")
end
# Get table name
tableName = nil
if ARGV.size > 1
tableName = ARGV[1]
raise IOError.new("Not supported yet")
elsif
# If none provided use dirname
tableName = srcdir.getName()
end
HTableDescriptor.isLegalTableName(tableName.to_java_bytes)
# Figure locations under hbase.rootdir
# Move directories into place; be careful not to overwrite.
rootdir = FSUtils.getRootDir(c)
tableDir = fs.makeQualified(Path.new(rootdir, tableName))
# If a directory currently in place, move it aside.
if srcdir.equals(tableDir)
LOG.info("Source directory is in place under hbase.rootdir: " + srcdir.toString());
elsif fs.exists(tableDir)
movedTableName = tableName + "." + java.lang.System.currentTimeMillis().to_s
movedTableDir = Path.new(rootdir, java.lang.String.new(movedTableName))
LOG.warn("Moving " + tableDir.toString() + " aside as " + movedTableDir.toString());
raise IOError.new("Failed move of " + tableDir.toString()) unless fs.rename(tableDir, movedTableDir)
LOG.info("Moving " + srcdir.toString() + " to " + tableDir.toString());
raise IOError.new("Failed move of " + srcdir.toString()) unless fs.rename(srcdir, tableDir)
end
# Clean mentions of table from .META.
# Scan the .META. and remove all lines that begin with tablename
LOG.info("Deleting mention of " + tableName + " from .META.")
metaTable = HTable.new(c, HConstants::META_TABLE_NAME)
tableNameMetaPrefix = tableName + HConstants::META_ROW_DELIMITER.chr
scan = Scan.new((tableNameMetaPrefix + HConstants::META_ROW_DELIMITER.chr).to_java_bytes)
scanner = metaTable.getScanner(scan)
# Use java.lang.String doing compares. Ruby String is a bit odd.
tableNameStr = java.lang.String.new(tableName)
while (result = scanner.next())
rowid = Bytes.toString(result.getRow())
rowidStr = java.lang.String.new(rowid)
if not rowidStr.startsWith(tableNameMetaPrefix)
# Gone too far, break
break
end
LOG.info("Deleting row from catalog: " + rowid);
d = Delete.new(result.getRow())
metaTable.delete(d)
end
scanner.close()
# Now, walk the table and per region, add an entry
LOG.info("Walking " + srcdir.toString() + " adding regions to catalog table")
statuses = fs.listStatus(srcdir)
for status in statuses
next unless status.isDir()
next if status.getPath().getName() == "compaction.dir"
regioninfofile = Path.new(status.getPath(), HRegion::REGIONINFO_FILE)
unless fs.exists(regioninfofile)
LOG.warn("Missing .regioninfo: " + regioninfofile.toString())
next
end
is = fs.open(regioninfofile)
hri = HRegionInfo.new()
hri.readFields(is)
is.close()
# TODO: Need to redo table descriptor with passed table name and then recalculate the region encoded names.
p = Put.new(hri.getRegionName())
p.add(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER, Writables.getBytes(hri))
metaTable.put(p)
LOG.info("Added to catalog: " + hri.toString())
raise IOError.new("regioninfo " + srcdir.toString() + " doesn't exist!")
end
is = fs.open(regioninfo)
hri = HRegionInfo.new()
hri.readFields(is)
is.close()
p = Put.new(hri.getRegionName())
p.add(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER, Writables.getBytes(hri))
metaTable.put(p)
LOG.info("Added to catalog: " + hri.toString())

27
pom.xml
View File

@ -638,6 +638,7 @@
https://issues.apache.org/jira/secure/attachment/12459473/hdfs-895-branch-20-append.txt
-->
<hadoop.version>0.20-append-r1057313</hadoop.version>
<jackson.version>1.5.5</jackson.version>
<jasper.version>5.5.23</jasper.version>
<jaxb-api.version>2.1</jaxb-api.version>
<jetty.version>6.1.26</jetty.version>
@ -677,9 +678,6 @@
implementation of the same, because Hadoop also uses this version
* javax.servlet:jsp-api in favour of org.mortbay.jetty:jsp-api-2.1
* javax.xml.stream:stax-api in favour of stax:stax-api
Note: Both org.apache.avro:avro and com.sun.jersey:jersey-json depend on Jackson so the version
is chosen which comes first in the list of dependencies (jersey in this case)
-->
<!-- General dependencies -->
@ -829,6 +827,29 @@
<artifactId>servlet-api-2.5</artifactId>
<version>${jetty.jspapi.version}</version>
</dependency>
<!-- While jackson is also a dependency of both jersey and avro, these
can bring in jars from different, incompatible versions. We force
the same version with these dependencies -->
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-jaxrs</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>

View File

@ -20,8 +20,11 @@ package org.apache.hadoop.hbase.regionserver.wal;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
@ -36,9 +39,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
import org.apache.hadoop.hbase.util.Bytes;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.codehaus.jackson.map.ObjectMapper;
/**
* HLogPrettyPrinter prints the contents of a given HLog with a variety of
@ -65,6 +66,8 @@ public class HLogPrettyPrinter {
private boolean firstTxn;
// useful for programatic capture of JSON output
private PrintStream out;
// for JSON encoding
private ObjectMapper mapper;
/**
* Basic constructor that simply initializes values to reasonable defaults.
@ -78,6 +81,7 @@ public class HLogPrettyPrinter {
persistentOutput = false;
firstTxn = true;
out = System.out;
mapper = new ObjectMapper();
}
/**
@ -239,23 +243,25 @@ public class HLogPrettyPrinter {
HLogKey key = entry.getKey();
WALEdit edit = entry.getEdit();
// begin building a transaction structure
JSONObject txn = new JSONObject(key.toStringMap());
Map<String, Object> txn = key.toStringMap();
// check output filters
if (sequence >= 0 && ((Long) txn.get("sequence")) != sequence)
continue;
if (region != null && !((String) txn.get("region")).equals(region))
continue;
// initialize list into which we will store atomic actions
JSONArray actions = new JSONArray();
List<Map> actions = new ArrayList<Map>();
for (KeyValue kv : edit.getKeyValues()) {
// add atomic operation to txn
JSONObject op = new JSONObject(kv.toStringMap());
Map<String, Object> op =
new HashMap<String, Object>(kv.toStringMap());
if (outputValues)
op.put("value", Bytes.toStringBinary(kv.getValue()));
// check row output filter
if (row == null || ((String) op.get("row")).equals(row))
actions.put(op);
actions.add(op);
}
if (actions.length() == 0)
if (actions.size() == 0)
continue;
txn.put("actions", actions);
if (outputJSON) {
@ -264,27 +270,26 @@ public class HLogPrettyPrinter {
firstTxn = false;
else
out.print(",");
out.print(txn);
// encode and print JSON
out.print(mapper.writeValueAsString(txn));
} else {
// Pretty output, complete with indentation by atomic action
out.println("Sequence " + txn.getLong("sequence") + " "
+ "from region " + txn.getString("region") + " " + "in table "
+ txn.getString("table"));
for (int i = 0; i < actions.length(); i++) {
JSONObject op = actions.getJSONObject(i);
out.println("Sequence " + txn.get("sequence") + " "
+ "from region " + txn.get("region") + " " + "in table "
+ txn.get("table"));
for (int i = 0; i < actions.size(); i++) {
Map op = actions.get(i);
out.println(" Action:");
out.println(" row: " + op.getString("row"));
out.println(" column: " + op.getString("family") + ":"
+ op.getString("qualifier"));
out.println(" row: " + op.get("row"));
out.println(" column: " + op.get("family") + ":"
+ op.get("qualifier"));
out.println(" at time: "
+ (new Date(op.getLong("timestamp"))));
+ (new Date((Long) op.get("timestamp"))));
if (outputValues)
out.println(" value: " + op.get("value"));
}
}
}
} catch (JSONException e) {
e.printStackTrace();
} finally {
log.close();
}