HHH-6282 clean out-of-date config files in etc dir

This commit is contained in:
Strong Liu 2011-06-02 22:03:37 +08:00
parent 945d9376a2
commit 8d558505c0
4 changed files with 0 additions and 464 deletions

View File

@ -1,116 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
An example of enabling support for sequences in Intersystems' Cache SQL 2007.1 database.
-->
<Export generator="Cache" version="9" zv="Cache for Windows NT (Intel) 5.0.17 (Build 6006U)" ts="2005-09-29 14:10:54">
<Project name="Hibernate_Sequences" LastModified="2005-09-29 14:10:54">
<Items>
<ProjectItem name="InterSystems.Sequences" type="CLS"/>
</Items>
</Project>
<Class name="InterSystems.Sequences">
<Description><![CDATA[
Class to maintain a table of counters for Oracle sequence or MSSql identity columns
<br><br>Counters can be incremented by calling the stored procedure BEFORE the insert
using syntax like: call InterSystems.Sequences_GetNext("Name"), or using standard SQL,
or part of an SQL select like:
<br><br>select InterSystems.Sequences_GetNext(sequencename) from InterSystems.Sequences where Name='sequencename'
<br>
<br>Can also be queried as table InterSystems.Sequences, but that data is actually stored
in ^InterSystems.Sequences. Note use of %CacheSqlStorage to speed incrementing.
<br>
<br> Note: to make the Sequences system-wide, simply map ^InterSystems.Sequences* to a
common location
<br>
<br> Note: counter names are case-insensitive and force to uppercase on disk.
<br><br> Merge of ideas by JSL and APC 09/2005
]]></Description>
<ClassType>persistent</ClassType>
<SqlRowIdPrivate>1</SqlRowIdPrivate>
<StorageStrategy>custom</StorageStrategy>
<Super>%Persistent</Super>
<TimeChanged>60172,44404.735854</TimeChanged>
<TimeCreated>60137,56752.747989</TimeCreated>
<ClassDefinitionError>0</ClassDefinitionError>
<Index name="UniqueIndex1">
<IdKey>1</IdKey>
<PrimaryKey>1</PrimaryKey>
<Properties>Name</Properties>
<Unique>1</Unique>
</Index>
<Property name="Name">
<Description>
The name of the sequence or identity, forced to uppercase. Typically a tablename
(MSSQL identities) or an Oracle-like Sequence name</Description>
<Type>%String</Type>
<Parameter name="MAXLEN" value="64"/>
</Property>
<Property name="Counter">
<Description>
Last assigned value for this Name. Initial </Description>
<Type>%Integer</Type>
<InitialExpression>0</InitialExpression>
</Property>
<Method name="GetNext">
<Description>
Returns an integer value with next assigned counter.</Description>
<ClassMethod>1</ClassMethod>
<FormalSpec>name:%String</FormalSpec>
<ReturnType>%Integer</ReturnType>
<SqlProc>1</SqlProc>
<Implementation><![CDATA[ quit $increment(^InterSystems.Sequences($zcvt(name,"U"))) //force name to uppercase to be safe
]]></Implementation>
</Method>
<Method name="Init">
<Description>
Hibernate procedure to intialise a sequence, but can be used at any time</Description>
<ClassMethod>1</ClassMethod>
<FormalSpec>SequenceName:%String</FormalSpec>
<ReturnType>%Integer</ReturnType>
<SqlProc>1</SqlProc>
<Implementation><![CDATA[
set ^InterSystems.Sequences($zcvt(SequenceName,"U"))=0
quit 0
]]></Implementation>
</Method>
<Method name="Drop">
<Description>
Hibernate procedure to kill a sequence, but can be used at any time</Description>
<ClassMethod>1</ClassMethod>
<FormalSpec>SequenceName:%String</FormalSpec>
<ReturnType>%Integer</ReturnType>
<SqlProc>1</SqlProc>
<Implementation><![CDATA[
kill ^InterSystems.Sequences($zcvt(SequenceName,"U"))
quit 0
]]></Implementation>
</Method>
<Storage name="custom">
<Type>%CacheSQLStorage</Type>
<StreamLocation>^InterSystems.SequencesS</StreamLocation>
<Property name="Counter"/>
<Property name="Name">
<Selectivity>1</Selectivity>
</Property>
<SQLMap name="datamap">
<Type>data</Type>
<Global>^InterSystems.Sequences</Global>
<Structure>delimited</Structure>
<Subscript name="1">
<Expression>{Name}</Expression>
</Subscript>
<Data name="Counter"/>
</SQLMap>
</Storage>
</Class>
<Checksum value="3603995477"/>
</Export>

View File

@ -1,119 +0,0 @@
#!perl -w
###############################################################################
#
# Name: cvs-dup-eol-fixer
# Author: Steve Ebersole
#
# Description:
# Script to fix the bad end-of-line issues that sometimes occur after checking
# out Hibernate source from the sourceforge CVS where everything is essentially
# double-spaced. What I found however, at least on my environment, was that
# this was actually caused by two carriage-return characters (i.e., \r\r) being
# substituted for all line endings. This script works under that assumption
# and fixes only that issue.
#
###############################################################################
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This subroutine is essentially a recursive directory searcher. It does also
# filter out anything from the CVS local admin dirs.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sub parsedir($) {
my @results = ();
my $dir = shift@_;
opendir(DIRHANDLE, $dir) or die("Unable to open dir [$dir] for parsing");
my @dir_contents = readdir(DIRHANDLE);
closedir(DIRHANDLE) or warn("Unable to close dir [$dir]");
foreach $element (@dir_contents) {
if ( $element eq "." || $element eq ".." ) {
# Nothing to do here...
}
elsif ($element =~ /CVS/) {
# nothing to do here...
}
# assume no extension means a directory
elsif ($element =~ /\./) {
if ($element =~ /\.java/) {
push( @results, "$dir/$element" );
}
}
else {
push( @results, parsedir("$dir/$element") );
}
}
return @results;
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This subroutine basically checks to see if the file needs to be fixed, based
# mainly on the number of adjacent (i.e., repeating) cariage-return characters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sub checkfile($) {
my $file_name = shift @_;
my $loop_count = 0;
my $adj_cr_count = 0;
my $line_count = 0;
open( INFILEHANDLE, "<$file_name" ) or die( "Unable to open file [$file_name] for check" );
while (<INFILEHANDLE>) {
$loop_count++;
@matches = m/\r\r/g;
$adj_cr_count = $adj_cr_count + $#matches + 1;
$line_count = $line_count + tr/\r\n/\r\n/;
}
close( INFILEHANDLE );
my $half_line_count = $line_count / 2;
return $loop_count == 1 && int($half_line_count) <= $adj_cr_count && $adj_cr_count <= int($half_line_count + 1);
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This is the subroutine where the file actually gets fixed. It is also
# responsible for making sure files get backed up before doing the fix.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sub fixfile($) {
my $file_name = shift @_;
my $file_text = "";
open( INFILEHANDLE, "<$file_name" ) or die( "Unable to open file [$file_name] for fix input" );
while (<INFILEHANDLE>) {
s/\r\r/\n/g;
$file_text .= $_;
}
close( INFILEHANDLE );
my $new_file_name = $file_name . ".old";
rename( $file_name, $new_file_name );
open( OUTFILEHANDLE, ">$file_name" ) or die( "Unable to open file [$file_name] for fix output" );
print( OUTFILEHANDLE $file_text );
close( OUTFILEHANDLE );
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Start main process
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
open( REPORTFILEHANDLE, ">cvs-dup-eol-fixer.report" ) or die( "Unable to open report file" );
my $basedir = shift @ARGV;
print( REPORTFILEHANDLE "Using basedir : $basedir\n");
my @file_list = parsedir($basedir);
foreach $file_name (@file_list) {
print( REPORTFILEHANDLE "Checking file [$file_name]\n" );
if ( checkfile($file_name) ) {
print( REPORTFILEHANDLE " Need to fix file : $file_name\n" );
fixfile($file_name);
}
}
close(REPORTFILEHANDLE) or warn("Unable to close report file");
__END__

View File

@ -1,110 +0,0 @@
# CACHE IN MEMORY
#
# If you want to disable memory caching, just uncomment this line.
#
# cache.memory=false
# CACHE KEY
#
# This is the key that will be used to store the cache in the application
# and session scope.
#
# If you want to set the cache key to anything other than the default
# uncomment this line and change the cache.key
#
# cache.key=__oscache_cache
# USE HOST DOMAIN NAME IN KEY
#
# Servers for multiple host domains may wish to add host name info to
# the generation of the key. If this is true, then uncomment the
# following line.
#
# cache.use.host.domain.in.key=true
# CACHE LISTENERS
#
# These hook OSCache events and perform various actions such as logging
# cache hits and misses, or broadcasting to other cache instances across a cluster.
# See the documentation for further information.
#
# cache.event.listeners=com.opensymphony.oscache.plugins.clustersupport.JMSBroadcastingListener, \
# com.opensymphony.oscache.extra.CacheEntryEventListenerImpl, \
# com.opensymphony.oscache.extra.CacheMapAccessEventListenerImpl, \
# com.opensymphony.oscache.extra.ScopeEventListenerImpl
# CACHE PERSISTENCE CLASS
#
# Specify the class to use for persistence. If you use the supplied DiskPersistenceListener,
# don't forget to supply the cache.path property to specify the location of the cache
# directory.
#
# If a persistence class is not specified, OSCache will use memory caching only.
#
# cache.persistence.class=com.opensymphony.oscache.plugins.diskpersistence.DiskPersistenceListener
# CACHE DIRECTORY
#
# This is the directory on disk where caches will be stored by the DiskPersistenceListener.
# it will be created if it doesn't already exist. Remember that OSCache must have
# write permission to this directory.
#
# Note: for Windows machines, this needs \ to be escaped
# ie Windows:
# cache.path=c:\\myapp\\cache
# or *ix:
# cache.path=/opt/myapp/cache
#
# cache.path=c:\\app\\cache
# CACHE ALGORITHM
#
# Default cache algorithm to use. Note that in order to use an algorithm
# the cache size must also be specified. If the cache size is not specified,
# the cache algorithm will be Unlimited cache.
#
# cache.algorithm=com.opensymphony.oscache.base.algorithm.LRUCache
# cache.algorithm=com.opensymphony.oscache.base.algorithm.FIFOCache
# cache.algorithm=com.opensymphony.oscache.base.algorithm.UnlimitedCache
# CACHE SIZE
#
# Default cache size in number of items. If a size is specified but not
# an algorithm, the cache algorithm used will be LRUCache.
#
cache.capacity=1000
# CACHE UNLIMITED DISK
# Use unlimited disk cache or not. The default value is false, which means
# the disk cache will be limited in size to the value specified by cache.capacity.
#
# cache.unlimited.disk=false
# JMS CLUSTER PROPERTIES
#
# Configuration properties for JMS clustering. See the clustering documentation
# for more information on these settings.
#
#cache.cluster.jms.topic.factory=java:comp/env/jms/TopicConnectionFactory
#cache.cluster.jms.topic.name=java:comp/env/jms/OSCacheTopic
#cache.cluster.jms.node.name=node1
# JAVAGROUPS CLUSTER PROPERTIES
#
# Configuration properites for the JavaGroups clustering. Only one of these
# should be specified. Default values (as shown below) will be used if niether
# property is set. See the clustering documentation and the JavaGroups project
# (www.javagroups.com) for more information on these settings.
#
#cache.cluster.properties=UDP(mcast_addr=231.12.21.132;mcast_port=45566;ip_ttl=32;mcast_send_buf_size=150000;mcast_recv_buf_size=80000):PING(timeout=2000;num_initial_members=3):MERGE2(min_interval=5000;max_interval=10000):FD_SOCK:VERIFY_SUSPECT(timeout=1500):pbcast.NAKACK(gc_lag=50;retransmit_timeout=300,600,1200,2400,4800):pbcast.STABLE(desired_avg_gossip=20000):UNICAST(timeout=5000):FRAG(frag_size=8096;down_thread=false;up_thread=false):pbcast.GMS(join_timeout=5000;join_retry_timeout=2000;shun=false;print_local_addr=true)
#cache.cluster.multicast.ip=231.12.21.132

View File

@ -1,119 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- ===================================================================== -->
<!-- -->
<!-- Sample TreeCache Service Configuration -->
<!-- -->
<!-- ===================================================================== -->
<server>
<classpath codebase="./lib" archives="jboss-cache.jar, jgroups.jar"/>
<!-- ==================================================================== -->
<!-- Defines TreeCache configuration -->
<!-- ==================================================================== -->
<mbean code="org.jboss.cache.TreeCache"
name="jboss.cache:service=TreeCache">
<depends>jboss:service=Naming</depends>
<depends>jboss:service=TransactionManager</depends>
<!--
TransactionManager configuration not required for Hibernate!
-->
<!--
Node isolation level : SERIALIZABLE
REPEATABLE_READ (default)
READ_COMMITTED
READ_UNCOMMITTED
NONE
-->
<attribute name="IsolationLevel">REPEATABLE_READ</attribute>
<!--
Valid modes are LOCAL
REPL_ASYNC
REPL_SYNC
-->
<attribute name="CacheMode">LOCAL</attribute>
<!-- Name of cluster. Needs to be the same for all clusters, in order
to find each other
-->
<attribute name="ClusterName">TreeCache-Cluster</attribute>
<!-- JGroups protocol stack properties. Can also be a URL,
e.g. file:/home/bela/default.xml
<attribute name="ClusterProperties"></attribute>
-->
<attribute name="ClusterConfig">
<config>
<!-- UDP: if you have a multihomed machine,
set the bind_addr attribute to the appropriate NIC IP address -->
<!-- UDP: On Windows machines, because of the media sense feature
being broken with multicast (even after disabling media sense)
set the loopback attribute to true -->
<UDP mcast_addr="228.1.2.3" mcast_port="45566"
ip_ttl="64" ip_mcast="true"
mcast_send_buf_size="150000" mcast_recv_buf_size="80000"
ucast_send_buf_size="150000" ucast_recv_buf_size="80000"
loopback="false"/>
<PING timeout="2000" num_initial_members="3"
up_thread="false" down_thread="false"/>
<MERGE2 min_interval="10000" max_interval="20000"/>
<FD shun="true" up_thread="true" down_thread="true"/>
<VERIFY_SUSPECT timeout="1500"
up_thread="false" down_thread="false"/>
<pbcast.NAKACK gc_lag="50" retransmit_timeout="600,1200,2400,4800"
up_thread="false" down_thread="false"/>
<pbcast.STABLE desired_avg_gossip="20000"
up_thread="false" down_thread="false"/>
<UNICAST timeout="600,1200,2400" window_size="100" min_threshold="10"
down_thread="false"/>
<FRAG frag_size="8192"
down_thread="false" up_thread="false"/>
<pbcast.GMS join_timeout="5000" join_retry_timeout="2000"
shun="true" print_local_addr="true"/>
<pbcast.STATE_TRANSFER up_thread="false" down_thread="false"/>
</config>
</attribute>
<!--
Max number of entries in the cache. If this is exceeded, the
eviction policy will kick some entries out in order to make
more room
-->
<attribute name="MaxCapacity">20000</attribute>
<!--
The max amount of time (in milliseconds) we wait until the
initial state (ie. the contents of the cache) are retrieved from
existing members in a clustered environment
-->
<attribute name="InitialStateRetrievalTimeout">20000</attribute>
<!--
Number of milliseconds to wait until all responses for a
synchronous call have been received.
-->
<attribute name="SyncReplTimeout">10000</attribute>
<!-- Max number of milliseconds to wait for a lock acquisition -->
<attribute name="LockAcquisitionTimeout">15000</attribute>
<!-- Max number of milliseconds we hold a lock (not currently
implemented) -->
<attribute name="LockLeaseTimeout">60000</attribute>
<!-- Name of the eviction policy class. Not supported now. -->
<attribute name="EvictionPolicyClass"></attribute>
</mbean>
</server>