HBASE-3374 Our jruby jar has *GPL jars in it; fix

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1051343 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2010-12-21 00:34:36 +00:00
parent 9f6db55496
commit a59bd1030b
10 changed files with 73 additions and 170 deletions

View File

@ -798,6 +798,7 @@ Release 0.90.0 - Unreleased
aren't split yet
HBASE-3371 Race in TestReplication can make it fail
HBASE-3323 OOME in master splitting logs
HBASE-3374 Our jruby jar has *GPL jars in it; fix
IMPROVEMENTS

View File

@ -480,7 +480,8 @@
<jetty.version>6.1.26</jetty.version>
<jetty.jspapi.version>6.1.14</jetty.jspapi.version>
<jersey.version>1.4</jersey.version>
<jruby.version>1.5.2</jruby.version>
<!--JRuby > 1.0.x has *GPL jars in it so we can't upgrade. See HBASE-3374-->
<jruby.version>1.0.3</jruby.version>
<jsr311.version>1.1.1</jsr311.version>
<junit.version>4.8.1</junit.version>
<log4j.version>1.2.16</log4j.version>

View File

@ -28,10 +28,6 @@
# whether the table exists and returns nil regardless.
include Java
java_import org.apache.hadoop.hbase.HConstants
java_import org.apache.hadoop.hbase.HColumnDescriptor
java_import org.apache.hadoop.hbase.HTableDescriptor
include_class('java.lang.Integer') {|package,name| "J#{name}" }
include_class('java.lang.Long') {|package,name| "J#{name}" }
include_class('java.lang.Boolean') {|package,name| "J#{name}" }
@ -40,9 +36,9 @@ module HBaseConstants
COLUMN = "COLUMN"
COLUMNS = "COLUMNS"
TIMESTAMP = "TIMESTAMP"
NAME = HConstants::NAME
VERSIONS = HConstants::VERSIONS
IN_MEMORY = HConstants::IN_MEMORY
NAME = org.apache.hadoop.hbase.HConstants::NAME
VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS
IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY
STOPROW = "STOPROW"
STARTROW = "STARTROW"
ENDROW = STOPROW
@ -64,8 +60,8 @@ module HBaseConstants
end
end
promote_constants(HColumnDescriptor.constants)
promote_constants(HTableDescriptor.constants)
promote_constants(org.apache.hadoop.hbase.HColumnDescriptor.constants)
promote_constants(org.apache.hadoop.hbase.HTableDescriptor.constants)
end
# Include classes definition

View File

@ -20,16 +20,6 @@
include Java
java_import org.apache.hadoop.hbase.client.HBaseAdmin
java_import org.apache.zookeeper.ZooKeeperMain
java_import org.apache.hadoop.hbase.HColumnDescriptor
java_import org.apache.hadoop.hbase.HTableDescriptor
java_import org.apache.hadoop.hbase.HRegionInfo
java_import org.apache.hadoop.hbase.util.Bytes
java_import org.apache.zookeeper.ZooKeeper
java_import org.apache.hadoop.hbase.io.hfile.Compression
java_import org.apache.hadoop.hbase.regionserver.StoreFile
# Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin
module Hbase
@ -37,11 +27,11 @@ module Hbase
include HBaseConstants
def initialize(configuration, formatter)
@admin = HBaseAdmin.new(configuration)
@admin = org.apache.hadoop.hbase.client.HBaseAdmin.new(configuration)
connection = @admin.getConnection()
@zk_wrapper = connection.getZooKeeperWatcher()
zk = @zk_wrapper.getZooKeeper()
@zk_main = ZooKeeperMain.new(zk)
@zk_main = org.apache.zookeeper.ZooKeeperMain.new(zk)
@formatter = formatter
end
@ -124,8 +114,8 @@ module Hbase
raise ArgumentError, "Table #{table_name} is enabled. Disable it first.'" if enabled?(table_name)
@admin.deleteTable(table_name)
flush(HConstants::META_TABLE_NAME)
major_compact(HConstants::META_TABLE_NAME)
flush(org.apache.hadoop.hbase.HConstants::META_TABLE_NAME)
major_compact(org.apache.hadoop.hbase.HConstants::META_TABLE_NAME)
end
#----------------------------------------------------------------------------------------------
@ -147,7 +137,7 @@ module Hbase
raise(ArgumentError, "Table must have at least one column family") if args.empty?
# Start defining the table
htd = HTableDescriptor.new(table_name)
htd = org.apache.hadoop.hbase.HTableDescriptor.new(table_name)
# All args are columns, add them to the table definition
# TODO: add table options support
@ -177,27 +167,27 @@ module Hbase
#----------------------------------------------------------------------------------------------
# Assign a region
def assign(region_name, force)
@admin.assign(Bytes.toBytes(region_name), java.lang.Boolean::valueOf(force))
@admin.assign(org.apache.hadoop.hbase.util.Bytes.toBytes(region_name), java.lang.Boolean::valueOf(force))
end
#----------------------------------------------------------------------------------------------
# Unassign a region
def unassign(region_name, force)
@admin.unassign(Bytes.toBytes(region_name), java.lang.Boolean::valueOf(force))
@admin.unassign(org.apache.hadoop.hbase.util.Bytes.toBytes(region_name), java.lang.Boolean::valueOf(force))
end
#----------------------------------------------------------------------------------------------
# Move a region
def move(encoded_region_name, server = nil)
@admin.move(Bytes.toBytes(encoded_region_name), server ? Bytes.toBytes(server): nil)
@admin.move(org.apache.hadoop.hbase.util.Bytes.toBytes(encoded_region_name), server ? org.apache.hadoop.hbase.util.Bytes.toBytes(server): nil)
end
#----------------------------------------------------------------------------------------------
# Returns table's structure description
def describe(table_name)
tables = @admin.listTables.to_a
tables << HTableDescriptor::META_TABLEDESC
tables << HTableDescriptor::ROOT_TABLEDESC
tables << org.apache.hadoop.hbase.HTableDescriptor::META_TABLEDESC
tables << org.apache.hadoop.hbase.HTableDescriptor::ROOT_TABLEDESC
tables.each do |t|
# Found the table
@ -210,7 +200,7 @@ module Hbase
#----------------------------------------------------------------------------------------------
# Truncates table (deletes all records by recreating the table)
def truncate(table_name)
h_table = HTable.new(table_name)
h_table = org.apache.hadoop.hbase.client.HTable.new(table_name)
table_description = h_table.getTableDescriptor()
yield 'Disabling table...' if block_given?
disable(table_name)
@ -353,35 +343,35 @@ module Hbase
# Return a new HColumnDescriptor made of passed args
def hcd(arg, htd)
# String arg, single parameter constructor
return HColumnDescriptor.new(arg) if arg.kind_of?(String)
return org.apache.hadoop.hbase.HColumnDescriptor.new(arg) if arg.kind_of?(String)
raise(ArgumentError, "Column family #{arg} must have a name") unless name = arg[NAME]
family = htd.getFamily(name.to_java_bytes)
# create it if it's a new family
family ||= HColumnDescriptor.new(name.to_java_bytes)
family ||= org.apache.hadoop.hbase.HColumnDescriptor.new(name.to_java_bytes)
family.setBlockCacheEnabled(JBoolean.valueOf(arg[HColumnDescriptor::BLOCKCACHE])) if arg.include?(HColumnDescriptor::BLOCKCACHE)
family.setScope(JInteger.valueOf(arg[REPLICATION_SCOPE])) if arg.include?(HColumnDescriptor::REPLICATION_SCOPE)
family.setInMemory(JBoolean.valueOf(arg[IN_MEMORY])) if arg.include?(HColumnDescriptor::IN_MEMORY)
family.setTimeToLive(JInteger.valueOf(arg[HColumnDescriptor::TTL])) if arg.include?(HColumnDescriptor::TTL)
family.setCompressionType(arg[HColumnDescriptor::COMPRESSION]) if arg.include?(HColumnDescriptor::COMPRESSION)
family.setBlocksize(JInteger.valueOf(arg[HColumnDescriptor::BLOCKSIZE])) if arg.include?(HColumnDescriptor::BLOCKSIZE)
family.setMaxVersions(JInteger.valueOf(arg[VERSIONS])) if arg.include?(HColumnDescriptor::VERSIONS)
if arg.include?(HColumnDescriptor::BLOOMFILTER)
bloomtype = arg[HColumnDescriptor::BLOOMFILTER].upcase
unless StoreFile::BloomType.constants.include?(bloomtype)
raise(ArgumentError, "BloomFilter type #{bloomtype} is not supported. Use one of " + StoreFile::BloomType.constants.join(" "))
family.setBlockCacheEnabled(JBoolean.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE)
family.setScope(JInteger.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE)
family.setInMemory(JBoolean.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY)
family.setTimeToLive(JInteger.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::TTL])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL)
family.setCompressionType(arg[org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION]) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION)
family.setBlocksize(JInteger.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE)
family.setMaxVersions(JInteger.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS)
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER)
bloomtype = arg[org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER].upcase
unless org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.include?(bloomtype)
raise(ArgumentError, "BloomFilter type #{bloomtype} is not supported. Use one of " + org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.join(" "))
else
family.setBloomFilterType(StoreFile::BloomType.valueOf(bloomtype))
family.setBloomFilterType(org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.valueOf(bloomtype))
end
end
if arg.include?(HColumnDescriptor::COMPRESSION)
compression = arg[HColumnDescriptor::COMPRESSION].upcase
unless Compression::Algorithm.constants.include?(compression)
raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + Compression::Algorithm.constants.join(" "))
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION)
compression = arg[org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION].upcase
unless org.apache.hadoop.hbase.io.hfile.Compression::Algorithm.constants.include?(compression)
raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.hfile.Compression::Algorithm.constants.join(" "))
else
family.setCompressionType(Compression::Algorithm.valueOf(compression))
family.setCompressionType(org.apache.hadoop.hbase.io.hfile.Compression::Algorithm.valueOf(compression))
end
end
return family
@ -391,22 +381,22 @@ module Hbase
# Enables/disables a region by name
def online(region_name, on_off)
# Open meta table
meta = HTable.new(HConstants::META_TABLE_NAME)
meta = org.apache.hadoop.hbase.client.HTable.new(org.apache.hadoop.hbase.HConstants::META_TABLE_NAME)
# Read region info
# FIXME: fail gracefully if can't find the region
region_bytes = Bytes.toBytes(region_name)
g = Get.new(region_bytes)
g.addColumn(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER)
region_bytes = org.apache.hadoop.hbase.util.Bytes.toBytes(region_name)
g = org.apache.hadoop.hbase.client.Get.new(region_bytes)
g.addColumn(org.apache.hadoop.hbase.HConstants::CATALOG_FAMILY, org.apache.hadoop.hbase.HConstants::REGIONINFO_QUALIFIER)
hri_bytes = meta.get(g).value
# Change region status
hri = Writables.getWritable(hri_bytes, HRegionInfo.new)
hri = org.apache.hadoop.hbase.util.Writables.getWritable(hri_bytes, org.apache.hadoop.hbase.HRegionInfo.new)
hri.setOffline(on_off)
# Write it back
put = Put.new(region_bytes)
put.add(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER, Writables.getBytes(hri))
put = org.apache.hadoop.hbase.client.Put.new(region_bytes)
put.add(org.apache.hadoop.hbase.HConstants::CATALOG_FAMILY, org.apache.hadoop.hbase.HConstants::REGIONINFO_QUALIFIER, org.apache.hadoop.hbase.util.Writables.getBytes(hri))
meta.put(put)
end
end

View File

@ -20,8 +20,6 @@
include Java
import org.apache.hadoop.hbase.HBaseConfiguration
require 'hbase/admin'
require 'hbase/table'

View File

@ -20,8 +20,6 @@
include Java
java_import org.apache.hadoop.hbase.client.replication.ReplicationAdmin
# Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin
module Hbase
@ -29,7 +27,7 @@ module Hbase
include HBaseConstants
def initialize(configuration, formatter)
@replication_admin = ReplicationAdmin.new(configuration)
@replication_admin = org.apache.hadoop.hbase.client.replication.ReplicationAdmin.new(configuration)
@formatter = formatter
end

View File

@ -20,19 +20,6 @@
include Java
java_import org.apache.hadoop.hbase.client.HTable
java_import org.apache.hadoop.hbase.KeyValue
java_import org.apache.hadoop.hbase.util.Bytes
java_import org.apache.hadoop.hbase.util.Writables
java_import org.apache.hadoop.hbase.client.Put
java_import org.apache.hadoop.hbase.client.Get
java_import org.apache.hadoop.hbase.client.Delete
java_import org.apache.hadoop.hbase.client.Scan
java_import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter
# Wrapper for org.apache.hadoop.hbase.client.HTable
module Hbase
@ -40,13 +27,13 @@ module Hbase
include HBaseConstants
def initialize(configuration, table_name, formatter)
@table = HTable.new(configuration, table_name)
@table = org.apache.hadoop.hbase.client.HTable.new(configuration, table_name)
end
#----------------------------------------------------------------------------------------------
# Put a cell 'value' at specified table/row/column
def put(row, column, value, timestamp = nil)
p = Put.new(row.to_s.to_java_bytes)
p = org.apache.hadoop.hbase.client.Put.new(row.to_s.to_java_bytes)
family, qualifier = parse_column_name(column)
if timestamp
p.add(family, qualifier, timestamp, value.to_s.to_java_bytes)
@ -58,14 +45,14 @@ module Hbase
#----------------------------------------------------------------------------------------------
# Delete a cell
def delete(row, column, timestamp = HConstants::LATEST_TIMESTAMP)
def delete(row, column, timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP)
deleteall(row, column, timestamp)
end
#----------------------------------------------------------------------------------------------
# Delete a row
def deleteall(row, column = nil, timestamp = HConstants::LATEST_TIMESTAMP)
d = Delete.new(row.to_s.to_java_bytes, timestamp, nil)
def deleteall(row, column = nil, timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP)
d = org.apache.hadoop.hbase.client.Delete.new(row.to_s.to_java_bytes, timestamp, nil)
if column
family, qualifier = parse_column_name(column)
d.deleteColumns(family, qualifier, timestamp)
@ -85,10 +72,10 @@ module Hbase
# Count rows in a table
def count(interval = 1000, caching_rows = 10)
# We can safely set scanner caching with the first key only filter
scan = Scan.new
scan = org.apache.hadoop.hbase.client.Scan.new
scan.cache_blocks = false
scan.caching = caching_rows
scan.setFilter(FirstKeyOnlyFilter.new)
scan.setFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter.new)
# Run the scanner
scanner = @table.getScanner(scan)
@ -111,7 +98,7 @@ module Hbase
#----------------------------------------------------------------------------------------------
# Get from table
def get(row, *args)
get = Get.new(row.to_s.to_java_bytes)
get = org.apache.hadoop.hbase.client.Get.new(row.to_s.to_java_bytes)
maxlength = -1
# Normalize args
@ -174,7 +161,7 @@ module Hbase
res = {}
result.list.each do |kv|
family = String.from_java_bytes(kv.getFamily)
qualifier = Bytes::toStringBinary(kv.getQualifier)
qualifier = org.apache.hadoop.hbase.util.Bytes::toStringBinary(kv.getQualifier)
column = "#{family}:#{qualifier}"
value = to_string(column, kv, maxlength)
@ -195,7 +182,7 @@ module Hbase
def get_counter(row, column)
family, qualifier = parse_column_name(column.to_s)
# Format get request
get = Get.new(row.to_s.to_java_bytes)
get = org.apache.hadoop.hbase.client.Get.new(row.to_s.to_java_bytes)
get.addColumn(family, qualifier)
get.setMaxVersions(1)
@ -205,7 +192,7 @@ module Hbase
# Fetch cell value
cell = result.list.first
Bytes::toLong(cell.getValue)
org.apache.hadoop.hbase.util.Bytes::toLong(cell.getValue)
end
#----------------------------------------------------------------------------------------------
@ -234,9 +221,9 @@ module Hbase
end
scan = if stoprow
Scan.new(startrow.to_java_bytes, stoprow.to_java_bytes)
org.apache.hadoop.hbase.client.Scan.new(startrow.to_java_bytes, stoprow.to_java_bytes)
else
Scan.new(startrow.to_java_bytes)
org.apache.hadoop.hbase.client.Scan.new(startrow.to_java_bytes)
end
columns.each { |c| scan.addColumns(c) }
@ -245,7 +232,7 @@ module Hbase
scan.setCacheBlocks(cache)
scan.setMaxVersions(versions) if versions > 1
else
scan = Scan.new
scan = org.apache.hadoop.hbase.client.Scan.new
end
# Start the scanner
@ -261,11 +248,11 @@ module Hbase
end
row = iter.next
key = Bytes::toStringBinary(row.getRow)
key = org.apache.hadoop.hbase.util.Bytes::toStringBinary(row.getRow)
row.list.each do |kv|
family = String.from_java_bytes(kv.getFamily)
qualifier = Bytes::toStringBinary(kv.getQualifier)
qualifier = org.apache.hadoop.hbase.util.Bytes::toStringBinary(kv.getQualifier)
column = "#{family}:#{qualifier}"
cell = to_string(column, kv, maxlength)
@ -298,12 +285,12 @@ module Hbase
# Checks if current table is one of the 'meta' tables
def is_meta_table?
tn = @table.table_name
Bytes.equals(tn, HConstants::META_TABLE_NAME) || Bytes.equals(tn, HConstants::ROOT_TABLE_NAME)
org.apache.hadoop.hbase.util.Bytes.equals(tn, org.apache.hadoop.hbase.HConstants::META_TABLE_NAME) || org.apache.hadoop.hbase.util.Bytes.equals(tn, org.apache.hadoop.hbase.HConstants::ROOT_TABLE_NAME)
end
# Returns family and (when has it) qualifier for a column name
def parse_column_name(column)
split = KeyValue.parseColumn(column.to_java_bytes)
split = org.apache.hadoop.hbase.KeyValue.parseColumn(column.to_java_bytes)
return split[0], (split.length > 1) ? split[1] : nil
end
@ -312,20 +299,20 @@ module Hbase
def to_string(column, kv, maxlength = -1)
if is_meta_table?
if column == 'info:regioninfo'
hri = Writables.getHRegionInfoOrNull(kv.getValue)
hri = org.apache.hadoop.hbase.util.Writables.getHRegionInfoOrNull(kv.getValue)
return "timestamp=%d, value=%s" % [kv.getTimestamp, hri.toString]
end
if column == 'info:serverstartcode'
if kv.getValue.length > 0
str_val = Bytes.toLong(kv.getValue)
str_val = org.apache.hadoop.hbase.util.Bytes.toLong(kv.getValue)
else
str_val = Bytes.toStringBinary(kv.getValue)
str_val = org.apache.hadoop.hbase.util.Bytes.toStringBinary(kv.getValue)
end
return "timestamp=%d, value=%s" % [kv.getTimestamp, str_val]
end
end
val = "timestamp=#{kv.getTimestamp}, value=#{Bytes::toStringBinary(kv.getValue)}"
val = "timestamp=#{kv.getTimestamp}, value=#{org.apache.hadoop.hbase.util.Bytes::toStringBinary(kv.getValue)}"
(maxlength != -1) ? val[0, maxlength] : val
end

View File

@ -24,6 +24,10 @@ module Shell
def help
return <<-EOF
Enable/Disable balancer. Returns previous balancer state.
Examples:
hbase> balance_switch true
hbase> balance_switch false
EOF
end

View File

@ -41,7 +41,7 @@ EOF
formatter.row([ table ])
end
formatter.footer(now, list.count)
formatter.footer(now, list.size)
end
end
end

View File

@ -1,72 +0,0 @@
/**
* Copyright 2009 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.jruby.embed.ScriptingContainer;
import org.jruby.embed.PathType;
/**
*
* @author scoundrel
*/
public class TestShell {
final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final static ScriptingContainer jruby = new ScriptingContainer();
@BeforeClass
public static void setUpBeforeClass() throws Exception {
// Start mini cluster
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);
TEST_UTIL.startMiniCluster();
// Configure jruby runtime
List<String> loadPaths = new ArrayList();
loadPaths.add("src/main/ruby");
loadPaths.add("src/test/ruby");
jruby.getProvider().setLoadPaths(loadPaths);
jruby.put("$TEST_CLUSTER", TEST_UTIL);
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testRunShellTests() throws IOException {
// Start all ruby tests
jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb");
}
}