From b8710bea03a9354dbbb6bed2a23a50d6d4dd46c9 Mon Sep 17 00:00:00 2001 From: Craig Russell Date: Mon, 4 Dec 2006 19:59:52 +0000 Subject: [PATCH] Added property svn:eol-style=native git-svn-id: https://svn.apache.org/repos/asf/incubator/openjpa/trunk@482327 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/openjpa/jdbc/meta/MappingTool.java | 2246 ++++++++--------- .../openjpa/jdbc/sql/DB2Dictionary.java | 324 +-- 2 files changed, 1285 insertions(+), 1285 deletions(-) diff --git a/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/meta/MappingTool.java b/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/meta/MappingTool.java index 728f34fb1..6636a1214 100644 --- a/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/meta/MappingTool.java +++ b/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/meta/MappingTool.java @@ -1,1123 +1,1123 @@ -/* - * Copyright 2006 The Apache Software Foundation. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.openjpa.jdbc.meta; - -import java.io.File; -import java.io.IOException; -import java.io.PrintWriter; -import java.io.Writer; -import java.sql.SQLException; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.Map; -import java.util.Set; - -import org.apache.openjpa.conf.OpenJPAConfiguration; -import org.apache.openjpa.jdbc.conf.JDBCConfiguration; -import org.apache.openjpa.jdbc.conf.JDBCConfigurationImpl; -import org.apache.openjpa.jdbc.kernel.JDBCSeq; -import org.apache.openjpa.jdbc.schema.Column; -import org.apache.openjpa.jdbc.schema.DynamicSchemaFactory; -import org.apache.openjpa.jdbc.schema.LazySchemaFactory; -import org.apache.openjpa.jdbc.schema.Schema; -import org.apache.openjpa.jdbc.schema.SchemaGenerator; -import org.apache.openjpa.jdbc.schema.SchemaGroup; -import org.apache.openjpa.jdbc.schema.SchemaSerializer; -import org.apache.openjpa.jdbc.schema.SchemaTool; -import org.apache.openjpa.jdbc.schema.Table; -import org.apache.openjpa.jdbc.schema.XMLSchemaSerializer; -import org.apache.openjpa.jdbc.sql.DBDictionary; -import org.apache.openjpa.kernel.Seq; -import org.apache.openjpa.lib.conf.Configurations; -import org.apache.openjpa.lib.log.Log; -import org.apache.openjpa.lib.meta.ClassArgParser; -import org.apache.openjpa.lib.util.Files; -import org.apache.openjpa.lib.util.Localizer; -import org.apache.openjpa.lib.util.Options; -import org.apache.openjpa.lib.util.Services; -import org.apache.openjpa.meta.ClassMetaData; -import org.apache.openjpa.meta.FieldMetaData; -import org.apache.openjpa.meta.JavaTypes; -import org.apache.openjpa.meta.MetaDataFactory; -import org.apache.openjpa.meta.MetaDataModes; -import org.apache.openjpa.meta.QueryMetaData; -import org.apache.openjpa.meta.SequenceMetaData; -import org.apache.openjpa.meta.ValueStrategies; -import org.apache.openjpa.util.GeneralException; -import org.apache.openjpa.util.InternalException; -import org.apache.openjpa.util.MetaDataException; - -/** - * Tool for manipulating class mappings and associated schema. - * - * @author Abe White - */ -public class MappingTool - implements MetaDataModes { - - public static final String SCHEMA_ACTION_NONE = "none"; - - public static final String ACTION_ADD = "add"; - public static final String ACTION_REFRESH = "refresh"; - public static final String ACTION_BUILD_SCHEMA = "buildSchema"; - public static final String ACTION_DROP = "drop"; - public static final String ACTION_VALIDATE = "validate"; - public static final String ACTION_EXPORT = "export"; - public static final String ACTION_IMPORT = "import"; - - public static final String[] ACTIONS = new String[]{ - ACTION_ADD, - ACTION_REFRESH, - ACTION_BUILD_SCHEMA, - ACTION_DROP, - ACTION_VALIDATE, - ACTION_EXPORT, - ACTION_IMPORT, - }; - - private static Localizer _loc = Localizer.forPackage(MappingTool.class); - - private final JDBCConfiguration _conf; - private final Log _log; - private final String _action; - private final boolean _meta; - private final int _mode; - - private MappingRepository _repos = null; - private SchemaGroup _schema = null; - private SchemaTool _schemaTool = null; - private String _schemaAction = SchemaTool.ACTION_ADD; - private boolean _readSchema = false; - private boolean _pks = false; - private boolean _fks = false; - private boolean _indexes = false; - private boolean _seqs = true; - private boolean _dropUnused = true; - private boolean _ignoreErrors = false; - private File _file = null; - private Writer _mappingWriter = null; - private Writer _schemaWriter = null; - - // buffer metadatas to be dropped - private Set _dropCls = null; - private Set _dropMap = null; - private boolean _flush = false; - private boolean _flushSchema = false; - - /** - * Constructor. Supply configuration and action. - */ - public MappingTool(JDBCConfiguration conf, String action, boolean meta) { - _conf = conf; - _log = conf.getLog(JDBCConfiguration.LOG_METADATA); - _meta = meta; - - if (action == null) - _action = ACTION_REFRESH; - else if (!Arrays.asList(ACTIONS).contains(action)) - throw new IllegalArgumentException("action == " + action); - else - _action = action; - - if (meta && ACTION_ADD.equals(_action)) - _mode = MODE_META; - else if (meta && ACTION_DROP.equals(_action)) - _mode = MODE_META | MODE_MAPPING | MODE_QUERY; - else - _mode = MODE_MAPPING; - } - - /** - * The action supplied on construction. - */ - public String getAction() { - return _action; - } - - /** - * Whether the action works on metadata as well as mappings. - */ - public boolean isMetaDataAction() { - return _meta; - } - - /** - * The schema modification policy, or none. See the - * ACTION constants in {@link SchemaTool}. Defaults to - * {@link SchemaTool#ACTION_ADD}. - */ - public String getSchemaAction() { - return _schemaAction; - } - - /** - * The schema modification policy, or none. See the - * ACTION constants in {@link SchemaTool}. Defaults to - * {@link SchemaTool#ACTION_ADD}. - */ - public void setSchemaAction(String schemaAction) { - _schemaAction = schemaAction; - } - - /** - * Set to true to read the entire schema before mapping. - * Leaving this option false saves time, but is dangerous when adding - * new mappings, because without full knowledge of the existing schema the - * mapping tool might create tables or indexes that conflict with - * existing components. - */ - public boolean getReadSchema() { - return _readSchema; - } - - /** - * Set to true to read the entire schema before mapping. - * Leaving this option false saves time, but is dangerous when adding - * new mappings, because without full knowledge of the existing schema the - * mapping tool might create tables or indexes that conflict with - * existing components. - */ - public void setReadSchema(boolean readSchema) { - _readSchema = readSchema; - } - - /** - * Whether to manipulate sequences. Defaults to true. - */ - public boolean getSequences() { - return _seqs; - } - - /** - * Whether to manipulate sequences. Defaults to true. - */ - public void setSequences(boolean seqs) { - _seqs = seqs; - } - - /** - * Whether indexes on existing tables should be manipulated. - * Defaults to false. - */ - public boolean getIndexes() { - return _indexes; - } - - /** - * Whether indexes on existing tables should be manipulated. - * Defaults to false. - */ - public void setIndexes(boolean indexes) { - _indexes = indexes; - } - - /** - * Whether foreign keys on existing tables should be manipulated. - * Defaults to false. - */ - public boolean getForeignKeys() { - return _fks; - } - - /** - * Whether foreign keys on existing tables should be manipulated. - * Defaults to false. - */ - public void setForeignKeys(boolean fks) { - _fks = fks; - } - - /** - * Whether primary keys on existing tables should be manipulated. - * Defaults to false. - */ - public boolean getPrimaryKeys() { - return _pks; - } - - /** - * Whether primary keys on existing tables should be manipulated. - * Defaults to false. - */ - public void setPrimaryKeys(boolean pks) { - _pks = pks; - } - - /** - * Whether schema components that are unused by any mapping will be - * dropped from this tool's {@link SchemaGroup}, and, depending on - * the schema action, from the database. Defaults to true. - */ - public boolean getDropUnusedComponents() { - return _dropUnused; - } - - /** - * Whether schema components that are unused by any mapping will be - * dropped from this tool's {@link SchemaGroup}, and, depending on - * the schema action, from the database. Defaults to true. - */ - public void setDropUnusedComponents(boolean dropUnused) { - _dropUnused = dropUnused; - } - - /** - * Whether and SQL errors should cause a failure or just issue a warning. - */ - public void setIgnoreErrors(boolean ignoreErrors) { - _ignoreErrors = ignoreErrors; - } - - /** - * Whether and SQL errors should cause a failure or just issue a warning. - */ - public boolean getIgnoreErrors() { - return _ignoreErrors; - } - - /** - * Return the schema tool to use for schema modification. - */ - public SchemaTool getSchemaTool() { - if (_schemaTool == null) - _schemaTool = newSchemaTool(_schemaAction); - return _schemaTool; - } - - /** - * Return the schema tool to use for schema modification. - */ - private SchemaTool newSchemaTool(String action) { - if (SCHEMA_ACTION_NONE.equals(action)) - action = null; - SchemaTool tool = new SchemaTool(_conf, action); - tool.setIgnoreErrors(getIgnoreErrors()); - tool.setPrimaryKeys(getPrimaryKeys()); - tool.setForeignKeys(getForeignKeys()); - tool.setIndexes(getIndexes()); - tool.setSequences(getSequences()); - return tool; - } - - /** - * Set the schema tool to use for schema modification. - */ - public void setSchemaTool(SchemaTool tool) { - _schemaTool = tool; - } - - /** - * The stream to export the planned schema to as an XML document. - * If non-null, then the database schema will not be altered. - */ - public Writer getSchemaWriter() { - return _schemaWriter; - } - - /** - * The stream to export the planned schema to as an XML document. - * If non-null, then the database schema will not be altered. - */ - public void setSchemaWriter(Writer schemaWriter) { - _schemaWriter = schemaWriter; - } - - /** - * The stream to export the planned mappings to as an XML document. - * If non-null, then the mapping repository will not be altered. - */ - public Writer getMappingWriter() { - return _mappingWriter; - } - - /** - * The stream to export the planned mappings to as an XML document. - * If non-null, then the mapping repository will not be altered. - */ - public void setMappingWriter(Writer mappingWriter) { - _mappingWriter = mappingWriter; - } - - /** - * If adding metadata, the metadata file to add to. - */ - public File getMetaDataFile() { - return _file; - } - - /** - * If adding metadata, the metadata file to add to. - */ - public void setMetaDataFile(File file) { - _file = file; - } - - /** - * Return the repository to use to access mapping information. - * Defaults to a new {@link MappingRepository}. - */ - public MappingRepository getRepository() { - if (_repos == null) { - _repos = _conf.newMappingRepositoryInstance(); - _repos.setSchemaGroup(getSchemaGroup()); - _repos.setValidate(_repos.VALIDATE_UNENHANCED, false); - } - return _repos; - } - - /** - * Set the repository to use to access mapping information. - */ - public void setRepository(MappingRepository repos) { - _repos = repos; - } - - /** - * Return the schema group to use in mapping. If none has been set, the - * schema will be generated from the database. - */ - public SchemaGroup getSchemaGroup() { - if (_schema == null) { - if (ACTION_BUILD_SCHEMA.equals(_action)) { - DynamicSchemaFactory factory = new DynamicSchemaFactory(); - factory.setConfiguration(_conf); - _schema = factory; - } else if (_readSchema - || SchemaTool.ACTION_RETAIN.equals(_schemaAction) - || SchemaTool.ACTION_REFRESH.equals(_schemaAction)) { - _schema = (SchemaGroup) getSchemaTool().getDBSchemaGroup(). - clone(); - } else { - // with this we'll just read tables as different mappings - // look for them - LazySchemaFactory factory = new LazySchemaFactory(); - factory.setConfiguration(_conf); - factory.setPrimaryKeys(getPrimaryKeys()); - factory.setForeignKeys(getForeignKeys()); - factory.setIndexes(getIndexes()); - _schema = factory; - } - - if (_schema.getSchemas().length == 0) - _schema.addSchema(); - } - return _schema; - } - - /** - * Set the schema to use in mapping. - */ - public void setSchemaGroup(SchemaGroup schema) { - _schema = schema; - } - - /** - * Reset the internal repository. This is called automatically after - * every {@link #record}. - */ - public void clear() { - _repos = null; - _schema = null; - _schemaTool = null; - _flush = false; - _flushSchema = false; - if (_dropCls != null) - _dropCls.clear(); - if (_dropMap != null) - _dropMap.clear(); - } - - /** - * Records the changes that have been made to both the mappings and the - * associated schema, and clears the tool for further use. This also - * involves clearing the internal mapping repository. - */ - public void record() { - MappingRepository repos = getRepository(); - MetaDataFactory io = repos.getMetaDataFactory(); - ClassMapping[] mappings; - if (!ACTION_DROP.equals(_action)) - mappings = repos.getMappings(); - else if (_dropMap != null) - mappings = (ClassMapping[]) _dropMap.toArray - (new ClassMapping[_dropMap.size()]); - else - mappings = new ClassMapping[0]; - - try { - if (_dropCls != null && !_dropCls.isEmpty()) { - Class[] cls = (Class[]) _dropCls.toArray - (new Class[_dropCls.size()]); - if (!io.drop(cls, _mode, null)) - _log.warn(_loc.get("bad-drop", _dropCls)); - } - - if (_flushSchema) { - // drop portions of the known schema that no mapping uses, and - // add sequences used for value generation - if (_dropUnused) - dropUnusedSchemaComponents(mappings); - addSequenceComponents(mappings); - - // now run the schematool as long as we're doing some schema - // action and the user doesn't just want an xml output - if (!SCHEMA_ACTION_NONE.equals(_schemaAction) - && (_schemaWriter == null || (_schemaTool != null - && _schemaTool.getWriter() != null))) { - SchemaTool tool = getSchemaTool(); - tool.setSchemaGroup(getSchemaGroup()); - tool.run(); - tool.record(); - } - - // xml output of schema? - if (_schemaWriter != null) { - // serialize the planned schema to the stream - SchemaSerializer ser = new XMLSchemaSerializer(_conf); - ser.addAll(getSchemaGroup()); - ser.serialize(_schemaWriter, ser.PRETTY); - _schemaWriter.flush(); - } - } - if (!_flush) - return; - - QueryMetaData[] queries = repos.getQueryMetaDatas(); - SequenceMetaData[] seqs = repos.getSequenceMetaDatas(); - Map output = null; - - // if we're outputting to stream, set all metas to same file so - // they get placed in single string - if (_mappingWriter != null) { - output = new HashMap(); - File tmp = new File("openjpatmp"); - for (int i = 0; i < mappings.length; i++) - mappings[i].setSource(tmp, mappings[i].SRC_OTHER); - for (int i = 0; i < queries.length; i++) - queries[i].setSource(tmp, queries[i].getSourceScope(), - queries[i].SRC_OTHER); - for (int i = 0; i < seqs.length; i++) - seqs[i].setSource(tmp, seqs[i].getSourceScope(), - seqs[i].SRC_OTHER); - } - - // store - if (!io.store(mappings, queries, seqs, _mode, output)) - throw new MetaDataException(_loc.get("bad-store")); - - // write to stream - if (_mappingWriter != null) { - PrintWriter out = new PrintWriter(_mappingWriter); - for (Iterator itr = output.values().iterator(); - itr.hasNext();) - out.println((String) itr.next()); - out.flush(); - } - } - catch (RuntimeException re) { - throw re; - } catch (Exception e) { - throw new GeneralException(e); - } finally { - clear(); - } - } - - /** - * Drops schema components that appear to be unused from the local - * copy of the schema group. - */ - private void dropUnusedSchemaComponents(ClassMapping[] mappings) { - FieldMapping[] fields; - for (int i = 0; i < mappings.length; i++) { - mappings[i].refSchemaComponents(); - mappings[i].getDiscriminator().refSchemaComponents(); - mappings[i].getVersion().refSchemaComponents(); - fields = mappings[i].getDefinedFieldMappings(); - for (int j = 0; j < fields.length; j++) - fields[j].refSchemaComponents(); - } - - // also allow the dbdictionary to ref any schema components that - // it adds apart from mappings - SchemaGroup group = getSchemaGroup(); - Schema[] schemas = group.getSchemas(); - Table[] tables; - DBDictionary dict = _conf.getDBDictionaryInstance(); - for (int i = 0; i < schemas.length; i++) { - tables = schemas[i].getTables(); - for (int j = 0; j < tables.length; j++) - dict.refSchemaComponents(tables[j]); - } - - group.removeUnusedComponents(); - } - - /** - * Add tables used by sequences to the given schema. - */ - private void addSequenceComponents(ClassMapping[] mappings) { - SchemaGroup group = getSchemaGroup(); - for (int i = 0; i < mappings.length; i++) - addSequenceComponents(mappings[i], group); - } - - /** - * Add tables used by sequences to the given schema. - */ - private void addSequenceComponents(ClassMapping mapping, - SchemaGroup group) { - SequenceMetaData smd = mapping.getIdentitySequenceMetaData(); - Seq seq = null; - if (smd != null) - seq = smd.getInstance(null); - else if (mapping.getIdentityStrategy() == ValueStrategies.NATIVE - || (mapping.getIdentityStrategy() == ValueStrategies.NONE - && mapping.getIdentityType() == ClassMapping.ID_DATASTORE)) - seq = _conf.getSequenceInstance(); - - if (seq instanceof JDBCSeq) - ((JDBCSeq) seq).addSchema(mapping, group); - - FieldMapping[] fmds; - if (mapping.getEmbeddingMetaData() == null) - fmds = mapping.getDefinedFieldMappings(); - else - fmds = mapping.getFieldMappings(); - for (int i = 0; i < fmds.length; i++) { - smd = fmds[i].getValueSequenceMetaData(); - if (smd != null) { - seq = smd.getInstance(null); - if (seq instanceof JDBCSeq) - ((JDBCSeq) seq).addSchema(mapping, group); - } else if (fmds[i].getEmbeddedMapping() != null) - addSequenceComponents(fmds[i].getEmbeddedMapping(), group); - } - } - - /////////// - // Actions - /////////// - - /** - * Run the configured action on the given instance. - */ - public void run(Class cls) { - if (ACTION_ADD.equals(_action)) { - if (_meta) - addMeta(cls); - else - add(cls); - } else if (ACTION_REFRESH.equals(_action)) - refresh(cls); - else if (ACTION_BUILD_SCHEMA.equals(_action)) - buildSchema(cls); - else if (ACTION_DROP.equals(_action)) - drop(cls); - else if (ACTION_VALIDATE.equals(_action)) - validate(cls); - } - - /** - * Add the mapping for the given instance. - */ - private void add(Class cls) { - if (cls == null) - return; - - MappingRepository repos = getRepository(); - repos.setStrategyInstaller(new MappingStrategyInstaller(repos)); - if (getMapping(repos, cls, true) != null) { - _flush = true; - _flushSchema = true; - } - } - - /** - * Return the mapping for the given type, or null if the type is - * persistence-aware. - */ - private static ClassMapping getMapping(MappingRepository repos, Class cls, - boolean validate) { - // this will parse all possible metadata rsrcs looking for cls, so - // will detect if p-aware - ClassMapping mapping = repos.getMapping(cls, null, false); - if (mapping != null) - return mapping; - if (!validate || cls.isInterface() - || repos.getPersistenceAware(cls) != null) - return null; - throw new MetaDataException(_loc.get("no-meta", cls)); - } - - /** - * Create a metadata for the given instance. - */ - private void addMeta(Class cls) { - if (cls == null) - return; - - _flush = true; - MappingRepository repos = getRepository(); - repos.setResolve(MODE_MAPPING, false); - MetaDataFactory factory = repos.getMetaDataFactory(); - factory.getDefaults().setIgnoreNonPersistent(false); - factory.setStoreMode(MetaDataFactory.STORE_VERBOSE); - - ClassMetaData meta = repos.addMetaData(cls); - FieldMetaData[] fmds = meta.getDeclaredFields(); - for (int i = 0; i < fmds.length; i++) { - if (fmds[i].getDeclaredTypeCode() == JavaTypes.OBJECT - && fmds[i].getDeclaredType() != Object.class) - fmds[i].setDeclaredTypeCode(JavaTypes.PC); - } - meta.setSource(_file, meta.getSourceType()); - meta.setResolve(MODE_META, true); - } - - /** - * Refresh or add the mapping for the given instance. - */ - private void refresh(Class cls) { - if (cls == null) - return; - - MappingRepository repos = getRepository(); - repos.setStrategyInstaller(new RefreshStrategyInstaller(repos)); - if (getMapping(repos, cls, true) != null) { - _flush = true; - _flushSchema = true; - } - } - - /** - * Validate the mappings for the given class and its fields. - */ - private void validate(Class cls) { - if (cls == null) - return; - - MappingRepository repos = getRepository(); - repos.setStrategyInstaller(new RuntimeStrategyInstaller(repos)); - if (getMapping(repos, cls, true) != null) - _flushSchema = !SCHEMA_ACTION_NONE.equals(_schemaAction) - && !SchemaTool.ACTION_ADD.equals(_schemaAction); - } - - /** - * Create the schema using the mapping for the given instance. - */ - private void buildSchema(Class cls) { - if (cls == null) - return; - - MappingRepository repos = getRepository(); - repos.setStrategyInstaller(new RuntimeStrategyInstaller(repos)); - if (getMapping(repos, cls, true) == null) - return; - - // set any logical pks to non-logical so they get flushed - _flushSchema = true; - Schema[] schemas = _schema.getSchemas(); - Table[] tables; - Column[] cols; - for (int i = 0; i < schemas.length; i++) { - tables = schemas[i].getTables(); - for (int j = 0; j < tables.length; j++) { - if (tables[j].getPrimaryKey() == null) - continue; - - tables[j].getPrimaryKey().setLogical(false); - cols = tables[j].getPrimaryKey().getColumns(); - for (int k = 0; k < cols.length; k++) - cols[k].setNotNull(true); - } - } - } - - /** - * Drop mapping for given class. - */ - private void drop(Class cls) { - if (cls == null) - return; - - if (_dropCls == null) - _dropCls = new HashSet(); - _dropCls.add(cls); - if (!SchemaTool.ACTION_DROP.equals(_schemaAction)) - return; - - MappingRepository repos = getRepository(); - repos.setStrategyInstaller(new RuntimeStrategyInstaller(repos)); - ClassMapping mapping = null; - try { - mapping = repos.getMapping(cls, null, false); - } catch (Exception e) { - } - - if (mapping != null) { - _flushSchema = true; - if (_dropMap == null) - _dropMap = new HashSet(); - _dropMap.add(mapping); - } else - _log.warn(_loc.get("no-drop-meta", cls)); - } - - //////// - // Main - //////// - - /** - * Usage: java org.apache.openjpa.jdbc.meta.MappingTool [option]* - * [-action/-a <refresh | add | buildSchema | drop | validate | import - * | export>] <class name | .java file | .class file | .jdo file>* - * Where the following options are recognized. - * - * The various actions are as follows. - * - * Each class supplied as an argument must have valid metadata. If - * no class arguments are given, the tool runs on all metadata files in - * the CLASSPATH. - * Examples: - * - */ - public static void main(String[] args) - throws IOException, SQLException { - Options opts = new Options(); - args = opts.setFromCmdLine(args); - JDBCConfiguration conf = new JDBCConfigurationImpl(); - try { - if (!run(conf, args, opts)) - System.err.println(_loc.get("tool-usage")); - } finally { - conf.close(); - } - } - - /** - * Run the tool. Returns false if invalid options are given. - * - * @see #main - */ - public static boolean run(JDBCConfiguration conf, String[] args, - Options opts) - throws IOException, SQLException { - if (opts.containsKey("help") || opts.containsKey("-help")) - return false; - - // flags - Flags flags = new Flags(); - flags.action = opts.removeProperty("action", "a", flags.action); - flags.schemaAction = opts.removeProperty("schemaAction", "sa", - flags.schemaAction); - flags.dropTables = opts.removeBooleanProperty - ("dropTables", "dt", flags.dropTables); - flags.openjpaTables = opts.removeBooleanProperty - ("openjpaTables", "ot", flags.openjpaTables); - flags.dropSequences = opts.removeBooleanProperty - ("dropSequences", "dsq", flags.dropSequences); - flags.readSchema = opts.removeBooleanProperty - ("readSchema", "rs", flags.readSchema); - flags.primaryKeys = opts.removeBooleanProperty - ("primaryKeys", "pk", flags.primaryKeys); - flags.indexes = opts.removeBooleanProperty("indexes", "ix", - flags.indexes); - flags.foreignKeys = opts.removeBooleanProperty("foreignKeys", "fk", - flags.foreignKeys); - flags.sequences = opts.removeBooleanProperty("sequences", "sq", - flags.sequences); - flags.ignoreErrors = opts.removeBooleanProperty - ("ignoreErrors", "i", flags.ignoreErrors); - flags.meta = opts.removeBooleanProperty("meta", "m", flags.meta); - String fileName = opts.removeProperty("file", "f", null); - String schemaFileName = opts.removeProperty("schemaFile", "sf", null); - String sqlFileName = opts.removeProperty("sqlFile", "sql", null); - String schemas = opts.removeProperty("s"); - if (schemas != null) - opts.setProperty("schemas", schemas); - - Configurations.populateConfiguration(conf, opts); - ClassLoader loader = conf.getClassResolverInstance(). - getClassLoader(MappingTool.class, null); - if (flags.meta && ACTION_ADD.equals(flags.action)) - flags.metaDataFile = Files.getFile(fileName, loader); - else - flags.mappingWriter = Files.getWriter(fileName, loader); - flags.schemaWriter = Files.getWriter(schemaFileName, loader); - flags.sqlWriter = Files.getWriter(sqlFileName, loader); - - return run(conf, args, flags, loader); - } - - /** - * Run the tool. Return false if an invalid option was given. - */ - public static boolean run(JDBCConfiguration conf, String[] args, - Flags flags, ClassLoader loader) - throws IOException, SQLException { - // default action based on whether the mapping defaults fills in - // missing info - if (flags.action == null) { - if (conf.getMappingDefaultsInstance().defaultMissingInfo()) - flags.action = ACTION_BUILD_SCHEMA; - else - flags.action = ACTION_REFRESH; - } - - // collect the classes to act on - Log log = conf.getLog(OpenJPAConfiguration.LOG_TOOL); - Collection classes = null; - if (args.length == 0) { - if (ACTION_IMPORT.equals(flags.action)) - return false; - log.info(_loc.get("running-all-classes")); - classes = conf.getMappingRepositoryInstance(). - loadPersistentTypes(true, loader); - } else { - classes = new HashSet(); - ClassArgParser classParser = conf.getMetaDataRepositoryInstance(). - getMetaDataFactory().newClassArgParser(); - classParser.setClassLoader(loader); - Class[] parsed; - for (int i = 0; args != null && i < args.length; i++) { - parsed = classParser.parseTypes(args[i]); - classes.addAll(Arrays.asList(parsed)); - } - } - - Class[] act = (Class[]) classes.toArray(new Class[classes.size()]); - if (ACTION_EXPORT.equals(flags.action)) { - // run exports until the first export succeeds - ImportExport[] instances = newImportExports(); - for (int i = 0; i < instances.length; i++) { - if (instances[i].exportMappings(conf, act, flags.meta, log, - flags.mappingWriter)) - return true; - } - return false; - } - if (ACTION_IMPORT.equals(flags.action)) { - // run exports until the first export succeeds - ImportExport[] instances = newImportExports(); - for (int i = 0; i < instances.length; i++) { - if (instances[i].importMappings(conf, act, args, flags.meta, - log, loader)) - return true; - } - return false; - } - - MappingTool tool; - try { - tool = new MappingTool(conf, flags.action, flags.meta); - } catch (IllegalArgumentException iae) { - return false; - } - - // setup the tool - tool.setIgnoreErrors(flags.ignoreErrors); - tool.setMetaDataFile(flags.metaDataFile); - tool.setMappingWriter(flags.mappingWriter); - tool.setSchemaAction(flags.schemaAction); - tool.setSchemaWriter(flags.schemaWriter); - tool.setReadSchema(flags.readSchema - && !ACTION_VALIDATE.equals(flags.action)); - tool.setPrimaryKeys(flags.primaryKeys); - tool.setForeignKeys(flags.foreignKeys); - tool.setIndexes(flags.indexes); - tool.setSequences(flags.sequences || flags.dropSequences); - - // make sure to do this after other settings so that other settings - // are passed on to schema tool - tool.getSchemaTool().setDropTables(flags.dropTables); - tool.getSchemaTool().setDropSequences(flags.dropSequences); - tool.getSchemaTool().setWriter(flags.sqlWriter); - tool.getSchemaTool().setOpenJPATables(flags.openjpaTables); - - // and run the action - for (int i = 0; i < act.length; i++) { - log.info(_loc.get("tool-running", act[i], flags.action)); - if (i == 0 && flags.readSchema) - log.info(_loc.get("tool-time")); - tool.run(act[i]); - } - log.info(_loc.get("tool-record")); - tool.record(); - return true; - } - - /** - * Create an {@link ImportExport} instance. - */ - private static ImportExport[] newImportExports() { - try { - Class[] types = Services.getImplementorClasses(ImportExport.class); - ImportExport[] instances = new ImportExport[types.length]; - for (int i = 0; i < types.length; i++) - instances[i] = (ImportExport) types[i].newInstance(); - return instances; - } catch (Throwable t) { - throw new InternalException(_loc.get("importexport-instantiate"),t); - } - } - - /** - * Run flags. - */ - public static class Flags { - - public String action = null; - public boolean meta = false; - public String schemaAction = SchemaTool.ACTION_ADD; - public File metaDataFile = null; - public Writer mappingWriter = null; - public Writer schemaWriter = null; - public Writer sqlWriter = null; - public boolean ignoreErrors = false; - public boolean readSchema = false; - public boolean dropTables = false; - public boolean openjpaTables = false; - public boolean dropSequences = false; - public boolean sequences = true; - public boolean primaryKeys = false; - public boolean foreignKeys = false; - public boolean indexes = false; - } - - /** - * Helper used to import and export mapping data. - */ - public static interface ImportExport { - - /** - * Import mappings for the given classes based on the given arguments. - */ - public boolean importMappings(JDBCConfiguration conf, Class[] act, - String[] args, boolean meta, Log log, ClassLoader loader) - throws IOException; - - /** - * Export mappings for the given classes based on the given arguments. - */ - public boolean exportMappings(JDBCConfiguration conf, Class[] act, - boolean meta, Log log, Writer writer) - throws IOException; - } -} +/* + * Copyright 2006 The Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.openjpa.jdbc.meta; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.Writer; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +import org.apache.openjpa.conf.OpenJPAConfiguration; +import org.apache.openjpa.jdbc.conf.JDBCConfiguration; +import org.apache.openjpa.jdbc.conf.JDBCConfigurationImpl; +import org.apache.openjpa.jdbc.kernel.JDBCSeq; +import org.apache.openjpa.jdbc.schema.Column; +import org.apache.openjpa.jdbc.schema.DynamicSchemaFactory; +import org.apache.openjpa.jdbc.schema.LazySchemaFactory; +import org.apache.openjpa.jdbc.schema.Schema; +import org.apache.openjpa.jdbc.schema.SchemaGenerator; +import org.apache.openjpa.jdbc.schema.SchemaGroup; +import org.apache.openjpa.jdbc.schema.SchemaSerializer; +import org.apache.openjpa.jdbc.schema.SchemaTool; +import org.apache.openjpa.jdbc.schema.Table; +import org.apache.openjpa.jdbc.schema.XMLSchemaSerializer; +import org.apache.openjpa.jdbc.sql.DBDictionary; +import org.apache.openjpa.kernel.Seq; +import org.apache.openjpa.lib.conf.Configurations; +import org.apache.openjpa.lib.log.Log; +import org.apache.openjpa.lib.meta.ClassArgParser; +import org.apache.openjpa.lib.util.Files; +import org.apache.openjpa.lib.util.Localizer; +import org.apache.openjpa.lib.util.Options; +import org.apache.openjpa.lib.util.Services; +import org.apache.openjpa.meta.ClassMetaData; +import org.apache.openjpa.meta.FieldMetaData; +import org.apache.openjpa.meta.JavaTypes; +import org.apache.openjpa.meta.MetaDataFactory; +import org.apache.openjpa.meta.MetaDataModes; +import org.apache.openjpa.meta.QueryMetaData; +import org.apache.openjpa.meta.SequenceMetaData; +import org.apache.openjpa.meta.ValueStrategies; +import org.apache.openjpa.util.GeneralException; +import org.apache.openjpa.util.InternalException; +import org.apache.openjpa.util.MetaDataException; + +/** + * Tool for manipulating class mappings and associated schema. + * + * @author Abe White + */ +public class MappingTool + implements MetaDataModes { + + public static final String SCHEMA_ACTION_NONE = "none"; + + public static final String ACTION_ADD = "add"; + public static final String ACTION_REFRESH = "refresh"; + public static final String ACTION_BUILD_SCHEMA = "buildSchema"; + public static final String ACTION_DROP = "drop"; + public static final String ACTION_VALIDATE = "validate"; + public static final String ACTION_EXPORT = "export"; + public static final String ACTION_IMPORT = "import"; + + public static final String[] ACTIONS = new String[]{ + ACTION_ADD, + ACTION_REFRESH, + ACTION_BUILD_SCHEMA, + ACTION_DROP, + ACTION_VALIDATE, + ACTION_EXPORT, + ACTION_IMPORT, + }; + + private static Localizer _loc = Localizer.forPackage(MappingTool.class); + + private final JDBCConfiguration _conf; + private final Log _log; + private final String _action; + private final boolean _meta; + private final int _mode; + + private MappingRepository _repos = null; + private SchemaGroup _schema = null; + private SchemaTool _schemaTool = null; + private String _schemaAction = SchemaTool.ACTION_ADD; + private boolean _readSchema = false; + private boolean _pks = false; + private boolean _fks = false; + private boolean _indexes = false; + private boolean _seqs = true; + private boolean _dropUnused = true; + private boolean _ignoreErrors = false; + private File _file = null; + private Writer _mappingWriter = null; + private Writer _schemaWriter = null; + + // buffer metadatas to be dropped + private Set _dropCls = null; + private Set _dropMap = null; + private boolean _flush = false; + private boolean _flushSchema = false; + + /** + * Constructor. Supply configuration and action. + */ + public MappingTool(JDBCConfiguration conf, String action, boolean meta) { + _conf = conf; + _log = conf.getLog(JDBCConfiguration.LOG_METADATA); + _meta = meta; + + if (action == null) + _action = ACTION_REFRESH; + else if (!Arrays.asList(ACTIONS).contains(action)) + throw new IllegalArgumentException("action == " + action); + else + _action = action; + + if (meta && ACTION_ADD.equals(_action)) + _mode = MODE_META; + else if (meta && ACTION_DROP.equals(_action)) + _mode = MODE_META | MODE_MAPPING | MODE_QUERY; + else + _mode = MODE_MAPPING; + } + + /** + * The action supplied on construction. + */ + public String getAction() { + return _action; + } + + /** + * Whether the action works on metadata as well as mappings. + */ + public boolean isMetaDataAction() { + return _meta; + } + + /** + * The schema modification policy, or none. See the + * ACTION constants in {@link SchemaTool}. Defaults to + * {@link SchemaTool#ACTION_ADD}. + */ + public String getSchemaAction() { + return _schemaAction; + } + + /** + * The schema modification policy, or none. See the + * ACTION constants in {@link SchemaTool}. Defaults to + * {@link SchemaTool#ACTION_ADD}. + */ + public void setSchemaAction(String schemaAction) { + _schemaAction = schemaAction; + } + + /** + * Set to true to read the entire schema before mapping. + * Leaving this option false saves time, but is dangerous when adding + * new mappings, because without full knowledge of the existing schema the + * mapping tool might create tables or indexes that conflict with + * existing components. + */ + public boolean getReadSchema() { + return _readSchema; + } + + /** + * Set to true to read the entire schema before mapping. + * Leaving this option false saves time, but is dangerous when adding + * new mappings, because without full knowledge of the existing schema the + * mapping tool might create tables or indexes that conflict with + * existing components. + */ + public void setReadSchema(boolean readSchema) { + _readSchema = readSchema; + } + + /** + * Whether to manipulate sequences. Defaults to true. + */ + public boolean getSequences() { + return _seqs; + } + + /** + * Whether to manipulate sequences. Defaults to true. + */ + public void setSequences(boolean seqs) { + _seqs = seqs; + } + + /** + * Whether indexes on existing tables should be manipulated. + * Defaults to false. + */ + public boolean getIndexes() { + return _indexes; + } + + /** + * Whether indexes on existing tables should be manipulated. + * Defaults to false. + */ + public void setIndexes(boolean indexes) { + _indexes = indexes; + } + + /** + * Whether foreign keys on existing tables should be manipulated. + * Defaults to false. + */ + public boolean getForeignKeys() { + return _fks; + } + + /** + * Whether foreign keys on existing tables should be manipulated. + * Defaults to false. + */ + public void setForeignKeys(boolean fks) { + _fks = fks; + } + + /** + * Whether primary keys on existing tables should be manipulated. + * Defaults to false. + */ + public boolean getPrimaryKeys() { + return _pks; + } + + /** + * Whether primary keys on existing tables should be manipulated. + * Defaults to false. + */ + public void setPrimaryKeys(boolean pks) { + _pks = pks; + } + + /** + * Whether schema components that are unused by any mapping will be + * dropped from this tool's {@link SchemaGroup}, and, depending on + * the schema action, from the database. Defaults to true. + */ + public boolean getDropUnusedComponents() { + return _dropUnused; + } + + /** + * Whether schema components that are unused by any mapping will be + * dropped from this tool's {@link SchemaGroup}, and, depending on + * the schema action, from the database. Defaults to true. + */ + public void setDropUnusedComponents(boolean dropUnused) { + _dropUnused = dropUnused; + } + + /** + * Whether and SQL errors should cause a failure or just issue a warning. + */ + public void setIgnoreErrors(boolean ignoreErrors) { + _ignoreErrors = ignoreErrors; + } + + /** + * Whether and SQL errors should cause a failure or just issue a warning. + */ + public boolean getIgnoreErrors() { + return _ignoreErrors; + } + + /** + * Return the schema tool to use for schema modification. + */ + public SchemaTool getSchemaTool() { + if (_schemaTool == null) + _schemaTool = newSchemaTool(_schemaAction); + return _schemaTool; + } + + /** + * Return the schema tool to use for schema modification. + */ + private SchemaTool newSchemaTool(String action) { + if (SCHEMA_ACTION_NONE.equals(action)) + action = null; + SchemaTool tool = new SchemaTool(_conf, action); + tool.setIgnoreErrors(getIgnoreErrors()); + tool.setPrimaryKeys(getPrimaryKeys()); + tool.setForeignKeys(getForeignKeys()); + tool.setIndexes(getIndexes()); + tool.setSequences(getSequences()); + return tool; + } + + /** + * Set the schema tool to use for schema modification. + */ + public void setSchemaTool(SchemaTool tool) { + _schemaTool = tool; + } + + /** + * The stream to export the planned schema to as an XML document. + * If non-null, then the database schema will not be altered. + */ + public Writer getSchemaWriter() { + return _schemaWriter; + } + + /** + * The stream to export the planned schema to as an XML document. + * If non-null, then the database schema will not be altered. + */ + public void setSchemaWriter(Writer schemaWriter) { + _schemaWriter = schemaWriter; + } + + /** + * The stream to export the planned mappings to as an XML document. + * If non-null, then the mapping repository will not be altered. + */ + public Writer getMappingWriter() { + return _mappingWriter; + } + + /** + * The stream to export the planned mappings to as an XML document. + * If non-null, then the mapping repository will not be altered. + */ + public void setMappingWriter(Writer mappingWriter) { + _mappingWriter = mappingWriter; + } + + /** + * If adding metadata, the metadata file to add to. + */ + public File getMetaDataFile() { + return _file; + } + + /** + * If adding metadata, the metadata file to add to. + */ + public void setMetaDataFile(File file) { + _file = file; + } + + /** + * Return the repository to use to access mapping information. + * Defaults to a new {@link MappingRepository}. + */ + public MappingRepository getRepository() { + if (_repos == null) { + _repos = _conf.newMappingRepositoryInstance(); + _repos.setSchemaGroup(getSchemaGroup()); + _repos.setValidate(_repos.VALIDATE_UNENHANCED, false); + } + return _repos; + } + + /** + * Set the repository to use to access mapping information. + */ + public void setRepository(MappingRepository repos) { + _repos = repos; + } + + /** + * Return the schema group to use in mapping. If none has been set, the + * schema will be generated from the database. + */ + public SchemaGroup getSchemaGroup() { + if (_schema == null) { + if (ACTION_BUILD_SCHEMA.equals(_action)) { + DynamicSchemaFactory factory = new DynamicSchemaFactory(); + factory.setConfiguration(_conf); + _schema = factory; + } else if (_readSchema + || SchemaTool.ACTION_RETAIN.equals(_schemaAction) + || SchemaTool.ACTION_REFRESH.equals(_schemaAction)) { + _schema = (SchemaGroup) getSchemaTool().getDBSchemaGroup(). + clone(); + } else { + // with this we'll just read tables as different mappings + // look for them + LazySchemaFactory factory = new LazySchemaFactory(); + factory.setConfiguration(_conf); + factory.setPrimaryKeys(getPrimaryKeys()); + factory.setForeignKeys(getForeignKeys()); + factory.setIndexes(getIndexes()); + _schema = factory; + } + + if (_schema.getSchemas().length == 0) + _schema.addSchema(); + } + return _schema; + } + + /** + * Set the schema to use in mapping. + */ + public void setSchemaGroup(SchemaGroup schema) { + _schema = schema; + } + + /** + * Reset the internal repository. This is called automatically after + * every {@link #record}. + */ + public void clear() { + _repos = null; + _schema = null; + _schemaTool = null; + _flush = false; + _flushSchema = false; + if (_dropCls != null) + _dropCls.clear(); + if (_dropMap != null) + _dropMap.clear(); + } + + /** + * Records the changes that have been made to both the mappings and the + * associated schema, and clears the tool for further use. This also + * involves clearing the internal mapping repository. + */ + public void record() { + MappingRepository repos = getRepository(); + MetaDataFactory io = repos.getMetaDataFactory(); + ClassMapping[] mappings; + if (!ACTION_DROP.equals(_action)) + mappings = repos.getMappings(); + else if (_dropMap != null) + mappings = (ClassMapping[]) _dropMap.toArray + (new ClassMapping[_dropMap.size()]); + else + mappings = new ClassMapping[0]; + + try { + if (_dropCls != null && !_dropCls.isEmpty()) { + Class[] cls = (Class[]) _dropCls.toArray + (new Class[_dropCls.size()]); + if (!io.drop(cls, _mode, null)) + _log.warn(_loc.get("bad-drop", _dropCls)); + } + + if (_flushSchema) { + // drop portions of the known schema that no mapping uses, and + // add sequences used for value generation + if (_dropUnused) + dropUnusedSchemaComponents(mappings); + addSequenceComponents(mappings); + + // now run the schematool as long as we're doing some schema + // action and the user doesn't just want an xml output + if (!SCHEMA_ACTION_NONE.equals(_schemaAction) + && (_schemaWriter == null || (_schemaTool != null + && _schemaTool.getWriter() != null))) { + SchemaTool tool = getSchemaTool(); + tool.setSchemaGroup(getSchemaGroup()); + tool.run(); + tool.record(); + } + + // xml output of schema? + if (_schemaWriter != null) { + // serialize the planned schema to the stream + SchemaSerializer ser = new XMLSchemaSerializer(_conf); + ser.addAll(getSchemaGroup()); + ser.serialize(_schemaWriter, ser.PRETTY); + _schemaWriter.flush(); + } + } + if (!_flush) + return; + + QueryMetaData[] queries = repos.getQueryMetaDatas(); + SequenceMetaData[] seqs = repos.getSequenceMetaDatas(); + Map output = null; + + // if we're outputting to stream, set all metas to same file so + // they get placed in single string + if (_mappingWriter != null) { + output = new HashMap(); + File tmp = new File("openjpatmp"); + for (int i = 0; i < mappings.length; i++) + mappings[i].setSource(tmp, mappings[i].SRC_OTHER); + for (int i = 0; i < queries.length; i++) + queries[i].setSource(tmp, queries[i].getSourceScope(), + queries[i].SRC_OTHER); + for (int i = 0; i < seqs.length; i++) + seqs[i].setSource(tmp, seqs[i].getSourceScope(), + seqs[i].SRC_OTHER); + } + + // store + if (!io.store(mappings, queries, seqs, _mode, output)) + throw new MetaDataException(_loc.get("bad-store")); + + // write to stream + if (_mappingWriter != null) { + PrintWriter out = new PrintWriter(_mappingWriter); + for (Iterator itr = output.values().iterator(); + itr.hasNext();) + out.println((String) itr.next()); + out.flush(); + } + } + catch (RuntimeException re) { + throw re; + } catch (Exception e) { + throw new GeneralException(e); + } finally { + clear(); + } + } + + /** + * Drops schema components that appear to be unused from the local + * copy of the schema group. + */ + private void dropUnusedSchemaComponents(ClassMapping[] mappings) { + FieldMapping[] fields; + for (int i = 0; i < mappings.length; i++) { + mappings[i].refSchemaComponents(); + mappings[i].getDiscriminator().refSchemaComponents(); + mappings[i].getVersion().refSchemaComponents(); + fields = mappings[i].getDefinedFieldMappings(); + for (int j = 0; j < fields.length; j++) + fields[j].refSchemaComponents(); + } + + // also allow the dbdictionary to ref any schema components that + // it adds apart from mappings + SchemaGroup group = getSchemaGroup(); + Schema[] schemas = group.getSchemas(); + Table[] tables; + DBDictionary dict = _conf.getDBDictionaryInstance(); + for (int i = 0; i < schemas.length; i++) { + tables = schemas[i].getTables(); + for (int j = 0; j < tables.length; j++) + dict.refSchemaComponents(tables[j]); + } + + group.removeUnusedComponents(); + } + + /** + * Add tables used by sequences to the given schema. + */ + private void addSequenceComponents(ClassMapping[] mappings) { + SchemaGroup group = getSchemaGroup(); + for (int i = 0; i < mappings.length; i++) + addSequenceComponents(mappings[i], group); + } + + /** + * Add tables used by sequences to the given schema. + */ + private void addSequenceComponents(ClassMapping mapping, + SchemaGroup group) { + SequenceMetaData smd = mapping.getIdentitySequenceMetaData(); + Seq seq = null; + if (smd != null) + seq = smd.getInstance(null); + else if (mapping.getIdentityStrategy() == ValueStrategies.NATIVE + || (mapping.getIdentityStrategy() == ValueStrategies.NONE + && mapping.getIdentityType() == ClassMapping.ID_DATASTORE)) + seq = _conf.getSequenceInstance(); + + if (seq instanceof JDBCSeq) + ((JDBCSeq) seq).addSchema(mapping, group); + + FieldMapping[] fmds; + if (mapping.getEmbeddingMetaData() == null) + fmds = mapping.getDefinedFieldMappings(); + else + fmds = mapping.getFieldMappings(); + for (int i = 0; i < fmds.length; i++) { + smd = fmds[i].getValueSequenceMetaData(); + if (smd != null) { + seq = smd.getInstance(null); + if (seq instanceof JDBCSeq) + ((JDBCSeq) seq).addSchema(mapping, group); + } else if (fmds[i].getEmbeddedMapping() != null) + addSequenceComponents(fmds[i].getEmbeddedMapping(), group); + } + } + + /////////// + // Actions + /////////// + + /** + * Run the configured action on the given instance. + */ + public void run(Class cls) { + if (ACTION_ADD.equals(_action)) { + if (_meta) + addMeta(cls); + else + add(cls); + } else if (ACTION_REFRESH.equals(_action)) + refresh(cls); + else if (ACTION_BUILD_SCHEMA.equals(_action)) + buildSchema(cls); + else if (ACTION_DROP.equals(_action)) + drop(cls); + else if (ACTION_VALIDATE.equals(_action)) + validate(cls); + } + + /** + * Add the mapping for the given instance. + */ + private void add(Class cls) { + if (cls == null) + return; + + MappingRepository repos = getRepository(); + repos.setStrategyInstaller(new MappingStrategyInstaller(repos)); + if (getMapping(repos, cls, true) != null) { + _flush = true; + _flushSchema = true; + } + } + + /** + * Return the mapping for the given type, or null if the type is + * persistence-aware. + */ + private static ClassMapping getMapping(MappingRepository repos, Class cls, + boolean validate) { + // this will parse all possible metadata rsrcs looking for cls, so + // will detect if p-aware + ClassMapping mapping = repos.getMapping(cls, null, false); + if (mapping != null) + return mapping; + if (!validate || cls.isInterface() + || repos.getPersistenceAware(cls) != null) + return null; + throw new MetaDataException(_loc.get("no-meta", cls)); + } + + /** + * Create a metadata for the given instance. + */ + private void addMeta(Class cls) { + if (cls == null) + return; + + _flush = true; + MappingRepository repos = getRepository(); + repos.setResolve(MODE_MAPPING, false); + MetaDataFactory factory = repos.getMetaDataFactory(); + factory.getDefaults().setIgnoreNonPersistent(false); + factory.setStoreMode(MetaDataFactory.STORE_VERBOSE); + + ClassMetaData meta = repos.addMetaData(cls); + FieldMetaData[] fmds = meta.getDeclaredFields(); + for (int i = 0; i < fmds.length; i++) { + if (fmds[i].getDeclaredTypeCode() == JavaTypes.OBJECT + && fmds[i].getDeclaredType() != Object.class) + fmds[i].setDeclaredTypeCode(JavaTypes.PC); + } + meta.setSource(_file, meta.getSourceType()); + meta.setResolve(MODE_META, true); + } + + /** + * Refresh or add the mapping for the given instance. + */ + private void refresh(Class cls) { + if (cls == null) + return; + + MappingRepository repos = getRepository(); + repos.setStrategyInstaller(new RefreshStrategyInstaller(repos)); + if (getMapping(repos, cls, true) != null) { + _flush = true; + _flushSchema = true; + } + } + + /** + * Validate the mappings for the given class and its fields. + */ + private void validate(Class cls) { + if (cls == null) + return; + + MappingRepository repos = getRepository(); + repos.setStrategyInstaller(new RuntimeStrategyInstaller(repos)); + if (getMapping(repos, cls, true) != null) + _flushSchema = !SCHEMA_ACTION_NONE.equals(_schemaAction) + && !SchemaTool.ACTION_ADD.equals(_schemaAction); + } + + /** + * Create the schema using the mapping for the given instance. + */ + private void buildSchema(Class cls) { + if (cls == null) + return; + + MappingRepository repos = getRepository(); + repos.setStrategyInstaller(new RuntimeStrategyInstaller(repos)); + if (getMapping(repos, cls, true) == null) + return; + + // set any logical pks to non-logical so they get flushed + _flushSchema = true; + Schema[] schemas = _schema.getSchemas(); + Table[] tables; + Column[] cols; + for (int i = 0; i < schemas.length; i++) { + tables = schemas[i].getTables(); + for (int j = 0; j < tables.length; j++) { + if (tables[j].getPrimaryKey() == null) + continue; + + tables[j].getPrimaryKey().setLogical(false); + cols = tables[j].getPrimaryKey().getColumns(); + for (int k = 0; k < cols.length; k++) + cols[k].setNotNull(true); + } + } + } + + /** + * Drop mapping for given class. + */ + private void drop(Class cls) { + if (cls == null) + return; + + if (_dropCls == null) + _dropCls = new HashSet(); + _dropCls.add(cls); + if (!SchemaTool.ACTION_DROP.equals(_schemaAction)) + return; + + MappingRepository repos = getRepository(); + repos.setStrategyInstaller(new RuntimeStrategyInstaller(repos)); + ClassMapping mapping = null; + try { + mapping = repos.getMapping(cls, null, false); + } catch (Exception e) { + } + + if (mapping != null) { + _flushSchema = true; + if (_dropMap == null) + _dropMap = new HashSet(); + _dropMap.add(mapping); + } else + _log.warn(_loc.get("no-drop-meta", cls)); + } + + //////// + // Main + //////// + + /** + * Usage: java org.apache.openjpa.jdbc.meta.MappingTool [option]* + * [-action/-a <refresh | add | buildSchema | drop | validate | import + * | export>] <class name | .java file | .class file | .jdo file>* + * Where the following options are recognized. + * + * The various actions are as follows. + * + * Each class supplied as an argument must have valid metadata. If + * no class arguments are given, the tool runs on all metadata files in + * the CLASSPATH. + * Examples: + * + */ + public static void main(String[] args) + throws IOException, SQLException { + Options opts = new Options(); + args = opts.setFromCmdLine(args); + JDBCConfiguration conf = new JDBCConfigurationImpl(); + try { + if (!run(conf, args, opts)) + System.err.println(_loc.get("tool-usage")); + } finally { + conf.close(); + } + } + + /** + * Run the tool. Returns false if invalid options are given. + * + * @see #main + */ + public static boolean run(JDBCConfiguration conf, String[] args, + Options opts) + throws IOException, SQLException { + if (opts.containsKey("help") || opts.containsKey("-help")) + return false; + + // flags + Flags flags = new Flags(); + flags.action = opts.removeProperty("action", "a", flags.action); + flags.schemaAction = opts.removeProperty("schemaAction", "sa", + flags.schemaAction); + flags.dropTables = opts.removeBooleanProperty + ("dropTables", "dt", flags.dropTables); + flags.openjpaTables = opts.removeBooleanProperty + ("openjpaTables", "ot", flags.openjpaTables); + flags.dropSequences = opts.removeBooleanProperty + ("dropSequences", "dsq", flags.dropSequences); + flags.readSchema = opts.removeBooleanProperty + ("readSchema", "rs", flags.readSchema); + flags.primaryKeys = opts.removeBooleanProperty + ("primaryKeys", "pk", flags.primaryKeys); + flags.indexes = opts.removeBooleanProperty("indexes", "ix", + flags.indexes); + flags.foreignKeys = opts.removeBooleanProperty("foreignKeys", "fk", + flags.foreignKeys); + flags.sequences = opts.removeBooleanProperty("sequences", "sq", + flags.sequences); + flags.ignoreErrors = opts.removeBooleanProperty + ("ignoreErrors", "i", flags.ignoreErrors); + flags.meta = opts.removeBooleanProperty("meta", "m", flags.meta); + String fileName = opts.removeProperty("file", "f", null); + String schemaFileName = opts.removeProperty("schemaFile", "sf", null); + String sqlFileName = opts.removeProperty("sqlFile", "sql", null); + String schemas = opts.removeProperty("s"); + if (schemas != null) + opts.setProperty("schemas", schemas); + + Configurations.populateConfiguration(conf, opts); + ClassLoader loader = conf.getClassResolverInstance(). + getClassLoader(MappingTool.class, null); + if (flags.meta && ACTION_ADD.equals(flags.action)) + flags.metaDataFile = Files.getFile(fileName, loader); + else + flags.mappingWriter = Files.getWriter(fileName, loader); + flags.schemaWriter = Files.getWriter(schemaFileName, loader); + flags.sqlWriter = Files.getWriter(sqlFileName, loader); + + return run(conf, args, flags, loader); + } + + /** + * Run the tool. Return false if an invalid option was given. + */ + public static boolean run(JDBCConfiguration conf, String[] args, + Flags flags, ClassLoader loader) + throws IOException, SQLException { + // default action based on whether the mapping defaults fills in + // missing info + if (flags.action == null) { + if (conf.getMappingDefaultsInstance().defaultMissingInfo()) + flags.action = ACTION_BUILD_SCHEMA; + else + flags.action = ACTION_REFRESH; + } + + // collect the classes to act on + Log log = conf.getLog(OpenJPAConfiguration.LOG_TOOL); + Collection classes = null; + if (args.length == 0) { + if (ACTION_IMPORT.equals(flags.action)) + return false; + log.info(_loc.get("running-all-classes")); + classes = conf.getMappingRepositoryInstance(). + loadPersistentTypes(true, loader); + } else { + classes = new HashSet(); + ClassArgParser classParser = conf.getMetaDataRepositoryInstance(). + getMetaDataFactory().newClassArgParser(); + classParser.setClassLoader(loader); + Class[] parsed; + for (int i = 0; args != null && i < args.length; i++) { + parsed = classParser.parseTypes(args[i]); + classes.addAll(Arrays.asList(parsed)); + } + } + + Class[] act = (Class[]) classes.toArray(new Class[classes.size()]); + if (ACTION_EXPORT.equals(flags.action)) { + // run exports until the first export succeeds + ImportExport[] instances = newImportExports(); + for (int i = 0; i < instances.length; i++) { + if (instances[i].exportMappings(conf, act, flags.meta, log, + flags.mappingWriter)) + return true; + } + return false; + } + if (ACTION_IMPORT.equals(flags.action)) { + // run exports until the first export succeeds + ImportExport[] instances = newImportExports(); + for (int i = 0; i < instances.length; i++) { + if (instances[i].importMappings(conf, act, args, flags.meta, + log, loader)) + return true; + } + return false; + } + + MappingTool tool; + try { + tool = new MappingTool(conf, flags.action, flags.meta); + } catch (IllegalArgumentException iae) { + return false; + } + + // setup the tool + tool.setIgnoreErrors(flags.ignoreErrors); + tool.setMetaDataFile(flags.metaDataFile); + tool.setMappingWriter(flags.mappingWriter); + tool.setSchemaAction(flags.schemaAction); + tool.setSchemaWriter(flags.schemaWriter); + tool.setReadSchema(flags.readSchema + && !ACTION_VALIDATE.equals(flags.action)); + tool.setPrimaryKeys(flags.primaryKeys); + tool.setForeignKeys(flags.foreignKeys); + tool.setIndexes(flags.indexes); + tool.setSequences(flags.sequences || flags.dropSequences); + + // make sure to do this after other settings so that other settings + // are passed on to schema tool + tool.getSchemaTool().setDropTables(flags.dropTables); + tool.getSchemaTool().setDropSequences(flags.dropSequences); + tool.getSchemaTool().setWriter(flags.sqlWriter); + tool.getSchemaTool().setOpenJPATables(flags.openjpaTables); + + // and run the action + for (int i = 0; i < act.length; i++) { + log.info(_loc.get("tool-running", act[i], flags.action)); + if (i == 0 && flags.readSchema) + log.info(_loc.get("tool-time")); + tool.run(act[i]); + } + log.info(_loc.get("tool-record")); + tool.record(); + return true; + } + + /** + * Create an {@link ImportExport} instance. + */ + private static ImportExport[] newImportExports() { + try { + Class[] types = Services.getImplementorClasses(ImportExport.class); + ImportExport[] instances = new ImportExport[types.length]; + for (int i = 0; i < types.length; i++) + instances[i] = (ImportExport) types[i].newInstance(); + return instances; + } catch (Throwable t) { + throw new InternalException(_loc.get("importexport-instantiate"),t); + } + } + + /** + * Run flags. + */ + public static class Flags { + + public String action = null; + public boolean meta = false; + public String schemaAction = SchemaTool.ACTION_ADD; + public File metaDataFile = null; + public Writer mappingWriter = null; + public Writer schemaWriter = null; + public Writer sqlWriter = null; + public boolean ignoreErrors = false; + public boolean readSchema = false; + public boolean dropTables = false; + public boolean openjpaTables = false; + public boolean dropSequences = false; + public boolean sequences = true; + public boolean primaryKeys = false; + public boolean foreignKeys = false; + public boolean indexes = false; + } + + /** + * Helper used to import and export mapping data. + */ + public static interface ImportExport { + + /** + * Import mappings for the given classes based on the given arguments. + */ + public boolean importMappings(JDBCConfiguration conf, Class[] act, + String[] args, boolean meta, Log log, ClassLoader loader) + throws IOException; + + /** + * Export mappings for the given classes based on the given arguments. + */ + public boolean exportMappings(JDBCConfiguration conf, Class[] act, + boolean meta, Log log, Writer writer) + throws IOException; + } +} diff --git a/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/sql/DB2Dictionary.java b/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/sql/DB2Dictionary.java index a0a39f9bf..ecbd0f8b1 100644 --- a/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/sql/DB2Dictionary.java +++ b/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/sql/DB2Dictionary.java @@ -1,162 +1,162 @@ -/* - * Copyright 2006 The Apache Software Foundation. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.openjpa.jdbc.sql; - -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.SQLException; -import java.util.Arrays; - -import org.apache.openjpa.jdbc.schema.Sequence; - -/** - * Dictionary for IBM DB2 database. - */ -public class DB2Dictionary - extends AbstractDB2Dictionary { - - public DB2Dictionary() { - platform = "DB2"; - validationSQL = "SELECT DISTINCT(CURRENT TIMESTAMP) FROM " - + "SYSIBM.SYSTABLES"; - supportsSelectEndIndex = true; - - nextSequenceQuery = "VALUES NEXTVAL FOR {0}"; - - binaryTypeName = "BLOB(1M)"; - longVarbinaryTypeName = "BLOB(1M)"; - varbinaryTypeName = "BLOB(1M)"; - clobTypeName = "CLOB(1M)"; - longVarcharTypeName = "LONG VARCHAR"; - - fixedSizeTypeNameSet.addAll(Arrays.asList(new String[]{ - "LONG VARCHAR FOR BIT DATA", "LONG VARCHAR", "LONG VARGRAPHIC", - })); - - maxConstraintNameLength = 18; - maxIndexNameLength = 18; - maxColumnNameLength = 30; - supportsDeferredConstraints = false; - supportsDefaultDeleteAction = false; - supportsAlterTableWithDropColumn = false; - - supportsNullTableForGetColumns = false; - - reservedWordSet.addAll(Arrays.asList(new String[]{ - "AFTER", "ALIAS", "ALLOW", "APPLICATION", "ASSOCIATE", "ASUTIME", - "AUDIT", "AUX", "AUXILIARY", "BEFORE", "BINARY", "BUFFERPOOL", - "CACHE", "CALL", "CALLED", "CAPTURE", "CARDINALITY", "CCSID", - "CLUSTER", "COLLECTION", "COLLID", "COMMENT", "CONCAT", - "CONDITION", "CONTAINS", "COUNT_BIG", "CURRENT_LC_CTYPE", - "CURRENT_PATH", "CURRENT_SERVER", "CURRENT_TIMEZONE", "CYCLE", - "DATA", "DATABASE", "DAYS", "DB2GENERAL", "DB2GENRL", "DB2SQL", - "DBINFO", "DEFAULTS", "DEFINITION", "DETERMINISTIC", "DISALLOW", - "DO", "DSNHATTR", "DSSIZE", "DYNAMIC", "EACH", "EDITPROC", "ELSEIF", - "ENCODING", "END-EXEC1", "ERASE", "EXCLUDING", "EXIT", "FENCED", - "FIELDPROC", "FILE", "FINAL", "FREE", "FUNCTION", "GENERAL", - "GENERATED", "GRAPHIC", "HANDLER", "HOLD", "HOURS", "IF", - "INCLUDING", "INCREMENT", "INDEX", "INHERIT", "INOUT", "INTEGRITY", - "ISOBID", "ITERATE", "JAR", "JAVA", "LABEL", "LC_CTYPE", "LEAVE", - "LINKTYPE", "LOCALE", "LOCATOR", "LOCATORS", "LOCK", "LOCKMAX", - "LOCKSIZE", "LONG", "LOOP", "MAXVALUE", "MICROSECOND", - "MICROSECONDS", "MINUTES", "MINVALUE", "MODE", "MODIFIES", "MONTHS", - "NEW", "NEW_TABLE", "NOCACHE", "NOCYCLE", "NODENAME", "NODENUMBER", - "NOMAXVALUE", "NOMINVALUE", "NOORDER", "NULLS", "NUMPARTS", "OBID", - "OLD", "OLD_TABLE", "OPTIMIZATION", "OPTIMIZE", "OUT", "OVERRIDING", - "PACKAGE", "PARAMETER", "PART", "PARTITION", "PATH", "PIECESIZE", - "PLAN", "PRIQTY", "PROGRAM", "PSID", "QUERYNO", "READS", "RECOVERY", - "REFERENCING", "RELEASE", "RENAME", "REPEAT", "RESET", "RESIGNAL", - "RESTART", "RESULT", "RESULT_SET_LOCATOR", "RETURN", "RETURNS", - "ROUTINE", "ROW", "RRN", "RUN", "SAVEPOINT", "SCRATCHPAD", - "SECONDS", "SECQTY", "SECURITY", "SENSITIVE", "SIGNAL", "SIMPLE", - "SOURCE", "SPECIFIC", "SQLID", "STANDARD", "START", "STATIC", - "STAY", "STOGROUP", "STORES", "STYLE", "SUBPAGES", "SYNONYM", - "SYSFUN", "SYSIBM", "SYSPROC", "SYSTEM", "TABLESPACE", "TRIGGER", - "TYPE", "UNDO", "UNTIL", "VALIDPROC", "VARIABLE", "VARIANT", "VCAT", - "VOLUMES", "WHILE", "WLM", "YEARS", - })); - } - - public boolean supportsRandomAccessResultSet(Select sel, - boolean forUpdate) { - return !forUpdate - && super.supportsRandomAccessResultSet(sel, forUpdate); - } - - protected void appendSelectRange(SQLBuffer buf, long start, long end) { - // appends the literal range string, since DB2 is unable to handle - // a bound parameter for it - buf.append(" FETCH FIRST ").append(Long.toString(end)). - append(" ROWS ONLY"); - } - - public String[] getCreateSequenceSQL(Sequence seq) { - String[] sql = super.getCreateSequenceSQL(seq); - if (seq.getAllocate() > 1) - sql[0] += " CACHE " + seq.getAllocate(); - return sql; - } - - protected String getSequencesSQL(String schemaName, String sequenceName) { - StringBuffer buf = new StringBuffer(); - buf.append("SELECT SEQSCHEMA AS SEQUENCE_SCHEMA, "). - append("SEQNAME AS SEQUENCE_NAME FROM SYSCAT.SEQUENCES"); - if (schemaName != null || sequenceName != null) - buf.append(" WHERE "); - if (schemaName != null) { - buf.append("SEQSCHEMA = ?"); - if (sequenceName != null) - buf.append(" AND "); - } - if (sequenceName != null) - buf.append("SEQNAME = ?"); - return buf.toString(); - } - - public Connection decorate(Connection conn) - throws SQLException { - // some versions of the DB2 driver seem to default to - // READ_UNCOMMITTED, which will prevent locking from working - // (multiple SELECT ... FOR UPDATE statements are allowed on - // the same instance); if we have not overridden the - // transaction isolation in the configuration, default to - // TRANSACTION_READ_COMMITTED - conn = super.decorate(conn); - - if (conf.getTransactionIsolationConstant() == -1 - && conn.getTransactionIsolation() < conn.TRANSACTION_READ_COMMITTED) - conn.setTransactionIsolation(conn.TRANSACTION_READ_COMMITTED); - - return conn; - } - - public void connectedConfiguration(Connection conn) throws SQLException { - super.connectedConfiguration(conn); - - DatabaseMetaData metaData = conn.getMetaData(); - if (metaData.getJDBCMajorVersion() >= 3) { - int maj = metaData.getDatabaseMajorVersion(); - int min = metaData.getDatabaseMinorVersion(); - - if (maj >= 9 || (maj == 8 && min >= 2)) { - supportsLockingWithMultipleTables = true; - supportsLockingWithInnerJoin = true; - supportsLockingWithOuterJoin = true; - forUpdateClause = "WITH RR USE AND KEEP UPDATE LOCKS"; - } - } - } -} +/* + * Copyright 2006 The Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.openjpa.jdbc.sql; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.SQLException; +import java.util.Arrays; + +import org.apache.openjpa.jdbc.schema.Sequence; + +/** + * Dictionary for IBM DB2 database. + */ +public class DB2Dictionary + extends AbstractDB2Dictionary { + + public DB2Dictionary() { + platform = "DB2"; + validationSQL = "SELECT DISTINCT(CURRENT TIMESTAMP) FROM " + + "SYSIBM.SYSTABLES"; + supportsSelectEndIndex = true; + + nextSequenceQuery = "VALUES NEXTVAL FOR {0}"; + + binaryTypeName = "BLOB(1M)"; + longVarbinaryTypeName = "BLOB(1M)"; + varbinaryTypeName = "BLOB(1M)"; + clobTypeName = "CLOB(1M)"; + longVarcharTypeName = "LONG VARCHAR"; + + fixedSizeTypeNameSet.addAll(Arrays.asList(new String[]{ + "LONG VARCHAR FOR BIT DATA", "LONG VARCHAR", "LONG VARGRAPHIC", + })); + + maxConstraintNameLength = 18; + maxIndexNameLength = 18; + maxColumnNameLength = 30; + supportsDeferredConstraints = false; + supportsDefaultDeleteAction = false; + supportsAlterTableWithDropColumn = false; + + supportsNullTableForGetColumns = false; + + reservedWordSet.addAll(Arrays.asList(new String[]{ + "AFTER", "ALIAS", "ALLOW", "APPLICATION", "ASSOCIATE", "ASUTIME", + "AUDIT", "AUX", "AUXILIARY", "BEFORE", "BINARY", "BUFFERPOOL", + "CACHE", "CALL", "CALLED", "CAPTURE", "CARDINALITY", "CCSID", + "CLUSTER", "COLLECTION", "COLLID", "COMMENT", "CONCAT", + "CONDITION", "CONTAINS", "COUNT_BIG", "CURRENT_LC_CTYPE", + "CURRENT_PATH", "CURRENT_SERVER", "CURRENT_TIMEZONE", "CYCLE", + "DATA", "DATABASE", "DAYS", "DB2GENERAL", "DB2GENRL", "DB2SQL", + "DBINFO", "DEFAULTS", "DEFINITION", "DETERMINISTIC", "DISALLOW", + "DO", "DSNHATTR", "DSSIZE", "DYNAMIC", "EACH", "EDITPROC", "ELSEIF", + "ENCODING", "END-EXEC1", "ERASE", "EXCLUDING", "EXIT", "FENCED", + "FIELDPROC", "FILE", "FINAL", "FREE", "FUNCTION", "GENERAL", + "GENERATED", "GRAPHIC", "HANDLER", "HOLD", "HOURS", "IF", + "INCLUDING", "INCREMENT", "INDEX", "INHERIT", "INOUT", "INTEGRITY", + "ISOBID", "ITERATE", "JAR", "JAVA", "LABEL", "LC_CTYPE", "LEAVE", + "LINKTYPE", "LOCALE", "LOCATOR", "LOCATORS", "LOCK", "LOCKMAX", + "LOCKSIZE", "LONG", "LOOP", "MAXVALUE", "MICROSECOND", + "MICROSECONDS", "MINUTES", "MINVALUE", "MODE", "MODIFIES", "MONTHS", + "NEW", "NEW_TABLE", "NOCACHE", "NOCYCLE", "NODENAME", "NODENUMBER", + "NOMAXVALUE", "NOMINVALUE", "NOORDER", "NULLS", "NUMPARTS", "OBID", + "OLD", "OLD_TABLE", "OPTIMIZATION", "OPTIMIZE", "OUT", "OVERRIDING", + "PACKAGE", "PARAMETER", "PART", "PARTITION", "PATH", "PIECESIZE", + "PLAN", "PRIQTY", "PROGRAM", "PSID", "QUERYNO", "READS", "RECOVERY", + "REFERENCING", "RELEASE", "RENAME", "REPEAT", "RESET", "RESIGNAL", + "RESTART", "RESULT", "RESULT_SET_LOCATOR", "RETURN", "RETURNS", + "ROUTINE", "ROW", "RRN", "RUN", "SAVEPOINT", "SCRATCHPAD", + "SECONDS", "SECQTY", "SECURITY", "SENSITIVE", "SIGNAL", "SIMPLE", + "SOURCE", "SPECIFIC", "SQLID", "STANDARD", "START", "STATIC", + "STAY", "STOGROUP", "STORES", "STYLE", "SUBPAGES", "SYNONYM", + "SYSFUN", "SYSIBM", "SYSPROC", "SYSTEM", "TABLESPACE", "TRIGGER", + "TYPE", "UNDO", "UNTIL", "VALIDPROC", "VARIABLE", "VARIANT", "VCAT", + "VOLUMES", "WHILE", "WLM", "YEARS", + })); + } + + public boolean supportsRandomAccessResultSet(Select sel, + boolean forUpdate) { + return !forUpdate + && super.supportsRandomAccessResultSet(sel, forUpdate); + } + + protected void appendSelectRange(SQLBuffer buf, long start, long end) { + // appends the literal range string, since DB2 is unable to handle + // a bound parameter for it + buf.append(" FETCH FIRST ").append(Long.toString(end)). + append(" ROWS ONLY"); + } + + public String[] getCreateSequenceSQL(Sequence seq) { + String[] sql = super.getCreateSequenceSQL(seq); + if (seq.getAllocate() > 1) + sql[0] += " CACHE " + seq.getAllocate(); + return sql; + } + + protected String getSequencesSQL(String schemaName, String sequenceName) { + StringBuffer buf = new StringBuffer(); + buf.append("SELECT SEQSCHEMA AS SEQUENCE_SCHEMA, "). + append("SEQNAME AS SEQUENCE_NAME FROM SYSCAT.SEQUENCES"); + if (schemaName != null || sequenceName != null) + buf.append(" WHERE "); + if (schemaName != null) { + buf.append("SEQSCHEMA = ?"); + if (sequenceName != null) + buf.append(" AND "); + } + if (sequenceName != null) + buf.append("SEQNAME = ?"); + return buf.toString(); + } + + public Connection decorate(Connection conn) + throws SQLException { + // some versions of the DB2 driver seem to default to + // READ_UNCOMMITTED, which will prevent locking from working + // (multiple SELECT ... FOR UPDATE statements are allowed on + // the same instance); if we have not overridden the + // transaction isolation in the configuration, default to + // TRANSACTION_READ_COMMITTED + conn = super.decorate(conn); + + if (conf.getTransactionIsolationConstant() == -1 + && conn.getTransactionIsolation() < conn.TRANSACTION_READ_COMMITTED) + conn.setTransactionIsolation(conn.TRANSACTION_READ_COMMITTED); + + return conn; + } + + public void connectedConfiguration(Connection conn) throws SQLException { + super.connectedConfiguration(conn); + + DatabaseMetaData metaData = conn.getMetaData(); + if (metaData.getJDBCMajorVersion() >= 3) { + int maj = metaData.getDatabaseMajorVersion(); + int min = metaData.getDatabaseMinorVersion(); + + if (maj >= 9 || (maj == 8 && min >= 2)) { + supportsLockingWithMultipleTables = true; + supportsLockingWithInnerJoin = true; + supportsLockingWithOuterJoin = true; + forUpdateClause = "WITH RR USE AND KEEP UPDATE LOCKS"; + } + } + } +}