Framework for allowing ranges in query strings to be specified as parameters

(not needed for JPQL, but for others).  Also consolidated some internal query
framework APIs.



git-svn-id: https://svn.apache.org/repos/asf/incubator/openjpa/trunk@438338 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
A. Abram White 2006-08-30 03:43:16 +00:00
parent 47ead9315d
commit 163cc2a22c
26 changed files with 348 additions and 433 deletions

View File

@ -130,10 +130,10 @@ public class JDBCStoreQuery
protected ResultObjectProvider executeQuery(Executor ex,
ClassMetaData base, ClassMetaData[] metas, boolean subclasses,
ExpressionFactory[] facts, QueryExpressions[] exps, Object[] params,
boolean lrs, long startIdx, long endIdx) {
Range range) {
if (metas.length > 1 && exps[0].isAggregate())
throw new UserException(Localizer.forPackage(JDBCStoreQuery.class)
.get("mult-mapping-aggregate", Arrays.asList(metas)));
throw new UserException(Localizer.forPackage(JDBCStoreQuery.class).
get("mult-mapping-aggregate", Arrays.asList(metas)));
ClassMapping[] mappings = (ClassMapping[]) metas;
JDBCFetchConfiguration fetch = (JDBCFetchConfiguration)
@ -143,12 +143,12 @@ public class JDBCStoreQuery
fetch.addJoins(Arrays.asList(exps[0].fetchPaths));
}
int eager = calculateEagerMode(exps[0], startIdx, endIdx);
int eager = calculateEagerMode(exps[0], range.start, range.end);
int subclassMode = fetch.getSubclassFetchMode((ClassMapping) base);
DBDictionary dict = _store.getDBDictionary();
long start = (mappings.length == 1 && dict.supportsSelectStartIndex)
? startIdx : 0L;
long end = (dict.supportsSelectEndIndex) ? endIdx : Long.MAX_VALUE;
? range.start : 0L;
long end = (dict.supportsSelectEndIndex) ? range.end : Long.MAX_VALUE;
// add selects with populate WHERE conditions to list
List sels = new ArrayList(mappings.length);
@ -165,8 +165,8 @@ public class JDBCStoreQuery
// we might want to use lrs settings if we can't use the range
if (sels.size() > 1)
start = 0L;
lrs = lrs || (fetch.getFetchBatchSize() >= 0 && (start != startIdx
|| end != endIdx));
boolean lrs = range.lrs || (fetch.getFetchBatchSize() >= 0
&& (start != range.start || end != range.end));
ResultObjectProvider[] rops = null;
ResultObjectProvider rop = null;
@ -209,9 +209,9 @@ public class JDBCStoreQuery
}
// need to fake result range?
if ((rops != null && endIdx != Long.MAX_VALUE) || start != startIdx
|| end != endIdx)
rop = new RangeResultObjectProvider(rop, startIdx, endIdx);
if ((rops != null && range.end != Long.MAX_VALUE)
|| start != range.start || end != range.end)
rop = new RangeResultObjectProvider(rop, range.start, range.end);
return rop;
}
@ -226,7 +226,6 @@ public class JDBCStoreQuery
final BitSet[] paged = (exps[0].projections.length > 0) ? null
: new BitSet[mappings.length];
union.select(new Union.Selector() {
public void select(Select sel, int idx) {
BitSet bits = populateSelect(sel, mappings[idx], subclasses,
(JDBCExpressionFactory) facts[idx], exps[idx], params,
@ -392,11 +391,11 @@ public class JDBCStoreQuery
* or the query is unique, use an eager setting of single. Otherwise use
* an eager mode of multiple.
*/
private int calculateEagerMode(QueryExpressions exps, long startIdx,
long endIdx) {
if (exps.projections.length > 0 || startIdx >= endIdx)
private int calculateEagerMode(QueryExpressions exps, long start,
long end) {
if (exps.projections.length > 0 || start >= end)
return EagerFetchModes.EAGER_NONE;
if (endIdx - startIdx == 1 || ctx.isUnique())
if (end - start == 1 || ctx.isUnique())
return EagerFetchModes.EAGER_JOIN;
return EagerFetchModes.EAGER_PARALLEL;
}
@ -479,19 +478,13 @@ public class JDBCStoreQuery
count += stmnt.executeUpdate();
} finally {
if (stmnt != null)
try {
stmnt.close();
} catch (SQLException se) {
}
try { stmnt.close(); } catch (SQLException se) {}
}
}
} catch (SQLException se) {
throw SQLExceptions.getStore(se, ctx, _store.getDBDictionary());
} finally {
try {
conn.close();
} catch (SQLException se) {
}
try { conn.close(); } catch (SQLException se) {}
}
return Numbers.valueOf(count);
}
@ -572,22 +565,22 @@ public class JDBCStoreQuery
protected String[] getDataStoreActions(ClassMetaData base,
ClassMetaData[] metas, boolean subclasses, ExpressionFactory[] facts,
QueryExpressions[] exps, Object[] params, long startIdx, long endIdx) {
QueryExpressions[] exps, Object[] params, Range range) {
ClassMapping[] mappings = (ClassMapping[]) metas;
JDBCFetchConfiguration fetch = (JDBCFetchConfiguration) ctx
.getFetchConfiguration();
JDBCFetchConfiguration fetch = (JDBCFetchConfiguration) ctx.
getFetchConfiguration();
if (exps[0].fetchPaths != null) {
fetch.addFields(Arrays.asList(exps[0].fetchPaths));
fetch.addJoins(Arrays.asList(exps[0].fetchPaths));
}
int eager = calculateEagerMode(exps[0], startIdx, endIdx);
int eager = calculateEagerMode(exps[0], range.start, range.end);
eager = Math.min(eager, JDBCFetchConfiguration.EAGER_JOIN);
int subclassMode = fetch.getSubclassFetchMode((ClassMapping) base);
DBDictionary dict = _store.getDBDictionary();
long start = (mappings.length == 1 && dict.supportsSelectStartIndex)
? startIdx : 0L;
long end = (dict.supportsSelectEndIndex) ? endIdx : Long.MAX_VALUE;
? range.start : 0L;
long end = (dict.supportsSelectEndIndex) ? range.end : Long.MAX_VALUE;
// add selects with populate WHERE conditions to list
List sels = new ArrayList(mappings.length);

View File

@ -184,7 +184,7 @@ public class SQLStoreQuery
}
public ResultObjectProvider executeQuery(StoreQuery q,
Object[] params, boolean lrs, long startIdx, long endIdx) {
Object[] params, Range range) {
JDBCStore store = ((SQLStoreQuery) q).getStore();
DBDictionary dict = store.getDBDictionary();
String sql = q.getContext().getQueryString();
@ -209,11 +209,11 @@ public class SQLStoreQuery
PreparedStatement stmnt = null;
try {
// use the right method depending on sel vs. proc, lrs setting
if (_select && !lrs)
if (_select && !range.lrs)
stmnt = buf.prepareStatement(conn);
else if (_select)
stmnt = buf.prepareStatement(conn, fetch, -1, -1);
else if (!lrs)
else if (!range.lrs)
stmnt = buf.prepareCall(conn);
else
stmnt = buf.prepareCall(conn, fetch, -1, -1);
@ -240,13 +240,13 @@ public class SQLStoreQuery
throw SQLExceptions.getStore(se, dict);
}
if (startIdx != 0 || endIdx != Long.MAX_VALUE)
rop = new RangeResultObjectProvider(rop, startIdx, endIdx);
if (range.start != 0 || range.end != Long.MAX_VALUE)
rop = new RangeResultObjectProvider(rop, range.start,range.end);
return rop;
}
public String[] getDataStoreActions(StoreQuery q, Object[] params,
long startIdx, long endIdx) {
Range range) {
return new String[]{ q.getContext().getQueryString() };
}

View File

@ -48,8 +48,6 @@ class SubQ
private Class _type = null;
private ClassMetaData _meta = null;
private QueryExpressions _exps = null;
private long _startIdx = 0;
private long _endIdx = Long.MAX_VALUE;
/**
* Constructor. Supply candidate, whether subclasses are included in
@ -100,11 +98,8 @@ class SubQ
return _alias;
}
public void setQueryExpressions(QueryExpressions query, long startIdx,
long endIdx) {
public void setQueryExpressions(QueryExpressions query) {
_exps = query;
_startIdx = startIdx;
_endIdx = endIdx;
}
public void initialize(Select sel, JDBCStore store, boolean nullTest) {
@ -180,7 +175,6 @@ class SubQ
_cons.CACHE_NULL, fetch);
_cons.select(store, _candidate, _subs, sel, _exps, params,
fetch, fetch.EAGER_NONE);
sel.setRange(_startIdx, _endIdx);
if (size)
sql.appendCount(sel, fetch);

View File

@ -22,7 +22,8 @@ update-failed-no-failed-obj: Database operation failed. Update count for SQL \
virtual-mapping: Cannot instantiate virtual mapping "{0}".
press-key-end: Server running. Press enter to stop.
no-server-conf: There is no persistence server configured.
server-usage: Usage: java org.apache.openjpa.jdbc.kernel.StartPersistenceServer\n\
server-usage: Usage: \
java org.apache.openjpa.jdbc.kernel.StartPersistenceServer\n\
\t[-properties/-p <properties file or resource>]\n\
\t[-<property name> <property value>]*
cant-lock-on-load: The database is unable to lock this query. Each object \
@ -42,7 +43,7 @@ batch-not-supported: The update count for the statement was an invalid \
value ({0}). This indicates that your database or JDBC driver does not \
have complete support for executing batch statements. Batch \
functionality should be disabled by including "BatchLimit=0" in \
your org.apache.openjpa.jdbc.DBDictionary configuration property. Statement: {1}
your openjpa.jdbc.DBDictionary configuration property. Statement: {1}
bad-synch-mappings: Invalid SynchronizeMappings operation ("{0}") specified. \
Valid operations are: {1}
make-native-seq: Creating sequence.
@ -59,7 +60,8 @@ bad-seq-type: This sequence of type "{0}" cannot generate values for \
no-seq-sql: Error instantiating named sequence "{0}": Your database dictionary \
does not support native sequences. To tell the dictionary how to select \
sequence values, use:\n\
org.apache.openjpa.jdbc.DBDictionary: NextSequenceQuery="SELECT NEXT VALUE FOR \{0\}"\n\
openjpa.jdbc.DBDictionary: NextSequenceQuery="SELECT NEXT VALUE \
FOR \{0\}"\n\
Where the above string is replaced with the proper SQL for your database.
invalid-seq-sql: No rows returned for sql "{0}". Check your configuration.
insert-seq: Inserting row for this mapping into sequence table.
@ -72,7 +74,8 @@ seq-usage: Usage: java org.apache.openjpa.jdbc.kernel.TableJDBCSeq\n\
\t[-properties/-p <properties file or resource>]\n\
\t[-<property name> <property value>]*\n\
\t-action/-a <add | drop | get | set> [value]
clstable-seq-usage: Usage: java org.apache.openjpa.jdbc.kernel.ClassTableJDBCSeq\n\
clstable-seq-usage: Usage: \
java org.apache.openjpa.jdbc.kernel.ClassTableJDBCSeq\n\
\t[-properties/-p <properties file or resource>]\n\
\t[-<property name> <property value>]*\n\
\t-action/-a <add | drop | get | set>\n\

View File

@ -0,0 +1,94 @@
package org.apache.openjpa.ant;
import java.io.IOException;
import org.apache.openjpa.conf.OpenJPAConfiguration;
import org.apache.openjpa.conf.OpenJPAConfigurationImpl;
import org.apache.openjpa.enhance.ApplicationIdTool;
import org.apache.openjpa.lib.ant.AbstractTask;
import org.apache.openjpa.lib.conf.ConfigurationImpl;
import org.apache.openjpa.lib.util.CodeFormat;
import org.apache.openjpa.lib.util.Files;
/**
* <p>Executes the application id tool on the specified files. This task
* can take the following arguments:
* <ul>
* <li><code>directory</code></li>
* <li><code>ignoreErrors</code></li>
* <li><code>name</code></li>
* <li><code>suffix</code></li>
* <li><code>token</code></li>
* </ul>
* It can also take an embedded <code>codeFormat</code> element with attributes
* for the bean properties of the {@link CodeFormat}.</p>
*/
public class ApplicationIdToolTask
extends AbstractTask {
protected ApplicationIdTool.Flags flags = new ApplicationIdTool.Flags();
protected String dirName = null;
/**
* Default constructor.
*/
public ApplicationIdToolTask() {
flags.format = new CodeFormat();
}
/**
* Set the output directory we want the enhancer to write to.
*/
public void setDirectory(String dirName) {
this.dirName = dirName;
}
/**
* Set whether to ignore errors.
*/
public void setIgnoreErrors(boolean ignoreErrors) {
flags.ignoreErrors = ignoreErrors;
}
/**
* Set the name of the identity class; with this option you must supply
* exactly one class to run on.
*/
public void setName(String name) {
flags.name = name;
}
/**
* Set a suffix to append to persistent classes to form their identity
* class name.
*/
public void setSuffix(String suffix) {
flags.suffix = suffix;
}
/**
* Set the token to separate stringified primary key field values.
*/
public void setToken(String token) {
flags.token = token;
}
/**
* Create the embedded code format element.
*/
public Object createCodeFormat() {
return flags.format;
}
protected ConfigurationImpl newConfiguration() {
return new OpenJPAConfigurationImpl();
}
protected void executeOn(String[] files)
throws IOException, ClassNotFoundException {
flags.directory = (dirName == null) ? null
: Files.getFile(dirName, getClassLoader());
ApplicationIdTool.run((OpenJPAConfiguration) getConfiguration(), files,
flags, getClassLoader ());
}
}

View File

@ -293,30 +293,17 @@ public class QueryCacheStoreQuery
}
public ResultObjectProvider executeQuery(StoreQuery q, Object[] params,
boolean lrs, long startIdx, long endIdx) {
Range range) {
QueryCacheStoreQuery cq = (QueryCacheStoreQuery) q;
QueryKey key = QueryKey.newInstance(cq.getContext(),
_ex.isPacking(q), params, _candidate, _subs, startIdx, endIdx);
_ex.isPacking(q), params, _candidate, _subs, range.start,
range.end);
List cached = cq.checkCache(key);
if (cached != null)
return new ListResultObjectProvider(cached);
ResultObjectProvider rop = _ex.executeQuery(cq.getDelegate(),
params, lrs, startIdx, endIdx);
return cq.wrapResult(rop, key);
}
public ResultObjectProvider executeQuery(StoreQuery q, Map params,
boolean lrs, long startIdx, long endIdx) {
QueryCacheStoreQuery cq = (QueryCacheStoreQuery) q;
QueryKey key = QueryKey.newInstance(cq.getContext(),
_ex.isPacking(q), params, _candidate, _subs, startIdx, endIdx);
List cached = cq.checkCache(key);
if (cached != null)
return new ListResultObjectProvider(cached);
ResultObjectProvider rop = _ex.executeQuery(cq.getDelegate(),
params, lrs, startIdx, endIdx);
params, range);
return cq.wrapResult(rop, key);
}
@ -351,14 +338,6 @@ public class QueryCacheStoreQuery
}
}
public Number executeDelete(StoreQuery q, Map params) {
try {
return _ex.executeDelete(unwrap(q), params);
} finally {
clearAccesssPath(q);
}
}
public Number executeUpdate(StoreQuery q, Object[] params) {
try {
return _ex.executeUpdate(unwrap(q), params);
@ -367,22 +346,18 @@ public class QueryCacheStoreQuery
}
}
public Number executeUpdate(StoreQuery q, Map params) {
try {
return _ex.executeUpdate(unwrap(q), params);
} finally {
clearAccesssPath(q);
}
}
public String[] getDataStoreActions(StoreQuery q, Object[] params,
long startIdx, long endIdx) {
Range range) {
return EMPTY_STRINGS;
}
public void validate(StoreQuery q) {
_ex.validate(unwrap(q));
}
public void getRange(StoreQuery q, Object[] params, Range range) {
_ex.getRange(q, params, range);
}
public Object getOrderingValue(StoreQuery q, Object[] params,
Object resultObject, int orderIndex) {

View File

@ -107,39 +107,25 @@ public abstract class AbstractStoreQuery
public static abstract class AbstractExecutor
implements Executor {
public ResultObjectProvider executeQuery(StoreQuery q, Map params,
boolean lrs, long startIdx, long endIdx) {
Object[] arr = q.getContext().toParameterArray
(q.getContext().getParameterTypes(), params);
return executeQuery(q, arr, lrs, startIdx, endIdx);
}
public Number executeDelete(StoreQuery q, Object[] params) {
return q.getContext().deleteInMemory(this, params);
}
public Number executeDelete(StoreQuery q, Map params) {
return executeDelete(q, q.getContext().toParameterArray
(q.getContext().getParameterTypes(), params));
}
public Number executeUpdate(StoreQuery q, Object[] params) {
return q.getContext().updateInMemory(this, params);
}
public Number executeUpdate(StoreQuery q, Map params) {
return executeUpdate(q, q.getContext().toParameterArray
(q.getContext().getParameterTypes(), params));
}
public String[] getDataStoreActions(StoreQuery q, Object[] params,
long startIdx, long endIdx) {
Range range) {
return EMPTY_STRINGS;
}
public void validate(StoreQuery q) {
}
public void getRange(StoreQuery q, Object[] params, Range range) {
}
public Object getOrderingValue(StoreQuery q, Object[] params,
Object resultObject, int orderIndex) {
return null;

View File

@ -403,14 +403,6 @@ public class DelegatingQuery
}
}
public Object[] toParameterArray(LinkedMap paramTypes, Map params) {
try {
return _query.toParameterArray(paramTypes, params);
} catch (RuntimeException re) {
throw translate(re);
}
}
public Number deleteInMemory(StoreQuery.Executor ex, Object[] params) {
try {
return _query.deleteInMemory(ex, params);

View File

@ -27,6 +27,7 @@ import org.apache.commons.collections.map.LinkedMap;
import org.apache.openjpa.conf.OpenJPAConfiguration;
import org.apache.openjpa.kernel.exps.AbstractExpressionVisitor;
import org.apache.openjpa.kernel.exps.AggregateListener;
import org.apache.openjpa.kernel.exps.Constant;
import org.apache.openjpa.kernel.exps.ExpressionFactory;
import org.apache.openjpa.kernel.exps.ExpressionParser;
import org.apache.openjpa.kernel.exps.FilterListener;
@ -177,18 +178,13 @@ public class ExpressionStoreQuery
* each base type
* @param parsed the parsed query values
* @param params parameter values, or empty array
* @param lrs whether the result will be handled as a potentially
* large result set, or will be consumed greedily
* @param startIdx 0-based inclusive index for first result to return
* from result object provider
* @param endIdx 0-based exclusive index for last result to return
* from result object provider, or {@link Long#MAX_VALUE} for no max
* @param range result range
* @return a provider for matching objects
*/
protected ResultObjectProvider executeQuery(Executor ex,
ClassMetaData base, ClassMetaData[] types, boolean subclasses,
ExpressionFactory[] facts, QueryExpressions[] parsed, Object[] params,
boolean lrs, long startIdx, long endIdx) {
Range range) {
throw new UnsupportedException();
}
@ -246,16 +242,12 @@ public class ExpressionStoreQuery
* each base type
* @param parsed the parsed query values
* @param params parameter values, or empty array
* @param startIdx 0-based inclusive index for first result to return
* from result object provider
* @param endIdx 0-based exclusive index for last result to return
* from result object provider, or {@link Long#MAX_VALUE} for no max
* @param range result range
* @return a textual description of the query to execute
*/
protected String[] getDataStoreActions(Executor ex, ClassMetaData base,
ClassMetaData[] types, boolean subclasses, ExpressionFactory[] facts,
QueryExpressions[] parsed, Object[] params, long startIdx,
long endIdx) {
QueryExpressions[] parsed, Object[] params, Range range) {
return StoreQuery.EMPTY_STRINGS;
}
@ -328,6 +320,30 @@ public class ExpressionStoreQuery
ValidateGroupingExpressionVisitor.validate(q.getContext(), exps);
}
public void getRange(StoreQuery q, Object[] params, Range range) {
QueryExpressions exps = assertQueryExpression();
if (exps.range.length == 0)
return;
if (exps.range.length == 2
&& exps.range[0] instanceof Constant
&& exps.range[1] instanceof Constant) {
try {
range.start = ((Number) ((Constant) exps.range[0]).
getValue(params)).longValue();
range.end = ((Number) ((Constant) exps.range[1]).
getValue(params)).longValue();
return;
} catch (ClassCastException cce) {
// fall through to exception below
} catch (NullPointerException npe) {
// fall through to exception below
}
}
throw new UserException(_loc.get("only-range-constants",
q.getContext().getQueryString()));
}
public final Class getResultClass(StoreQuery q) {
return assertQueryExpression().resultClass;
}
@ -464,7 +480,7 @@ public class ExpressionStoreQuery
_subs = subclasses;
_factory = new InMemoryExpressionFactory();
_exps = new QueryExpressions[]{
_exps = new QueryExpressions[] {
parser.eval(parsed, q, _factory, _meta)
};
if (_exps[0].projections.length == 0)
@ -488,7 +504,7 @@ public class ExpressionStoreQuery
}
public ResultObjectProvider executeQuery(StoreQuery q,
Object[] params, boolean lrs, long startIdx, long endIdx) {
Object[] params, Range range) {
// execute in memory for candidate collection;
// also execute in memory for transactional extents
Collection coll = q.getContext().getCandidateCollection();
@ -539,19 +555,13 @@ public class ExpressionStoreQuery
results = _factory.distinct(_exps[0], coll == null, results);
ResultObjectProvider rop = new ListResultObjectProvider(results);
if (startIdx != 0 || endIdx != Long.MAX_VALUE)
rop = new RangeResultObjectProvider(rop, startIdx, endIdx);
if (range.start != 0 || range.end != Long.MAX_VALUE)
rop = new RangeResultObjectProvider(rop, range.start,range.end);
return rop;
}
public ResultObjectProvider executeQuery(StoreQuery q,
Map params, boolean lrs, long startIdx, long endIdx) {
return executeQuery(q, q.getContext().toParameterArray
(getParameterTypes(q), params), lrs, startIdx, endIdx);
}
public String[] getDataStoreActions(StoreQuery q, Object[] params,
long startIdx, long endIdx) {
Range range) {
// in memory queries have no datastore actions to perform
return StoreQuery.EMPTY_STRINGS;
}
@ -646,16 +656,10 @@ public class ExpressionStoreQuery
}
public ResultObjectProvider executeQuery(StoreQuery q,
Object[] params, boolean lrs, long startIdx, long endIdx) {
lrs = lrs && !isAggregate(q) && !hasGrouping(q);
Object[] params, Range range) {
range.lrs &= !isAggregate(q) && !hasGrouping(q);
return ((ExpressionStoreQuery) q).executeQuery(this, _meta, _metas,
_subs, _facts, _exps, params, lrs, startIdx, endIdx);
}
public ResultObjectProvider executeQuery(StoreQuery q,
Map params, boolean lrs, long startIdx, long endIdx) {
return executeQuery(q, q.getContext().toParameterArray
(getParameterTypes(q), params), lrs, startIdx, endIdx);
_subs, _facts, _exps, params, range);
}
public Number executeDelete(StoreQuery q, Object[] params) {
@ -666,11 +670,6 @@ public class ExpressionStoreQuery
return num;
}
public Number executeDelete(StoreQuery q, Map params) {
return executeDelete(q, q.getContext().toParameterArray
(getParameterTypes(q), params));
}
public Number executeUpdate(StoreQuery q, Object[] params) {
Number num = ((ExpressionStoreQuery) q).executeUpdate(this, _meta,
_metas, _subs, _facts, _exps, params);
@ -679,15 +678,10 @@ public class ExpressionStoreQuery
return num;
}
public Number executeUpdate(StoreQuery q, Map params) {
return executeUpdate(q, q.getContext().toParameterArray
(getParameterTypes(q), params));
}
public String[] getDataStoreActions(StoreQuery q, Object[] params,
long startIdx, long endIdx) {
Range range) {
return ((ExpressionStoreQuery) q).getDataStoreActions(this, _meta,
_metas, _subs, _facts, _exps, params, startIdx, endIdx);
_metas, _subs, _facts, _exps, params, range);
}
public Object getOrderingValue(StoreQuery q, Object[] params,

View File

@ -135,7 +135,7 @@ public class MethodStoreQuery
}
public ResultObjectProvider executeQuery(StoreQuery q,
Object[] params, boolean lrs, long startIdx, long endIdx) {
Object[] params, Range range) {
// convert the parameters into a map
Map paramMap;
if (params.length == 0)
@ -148,19 +148,14 @@ public class MethodStoreQuery
itr.hasNext(); idx++)
paramMap.put(itr.next(), params[idx]);
}
return executeQuery(q, paramMap, lrs, startIdx, endIdx);
}
public ResultObjectProvider executeQuery(StoreQuery q,
Map params, boolean lrs, long startIdx, long endIdx) {
FetchConfiguration fetch = q.getContext().getFetchConfiguration();
StoreContext sctx = q.getContext().getStoreContext();
ResultObjectProvider rop;
Object[] args;
if (_inMem) {
args = new Object[]{ sctx, _meta, (_subs) ? Boolean.TRUE
: Boolean.FALSE, null, params, fetch };
: Boolean.FALSE, null, paramMap, fetch };
Iterator itr = null;
Collection coll = q.getContext().getCandidateCollection();
@ -192,12 +187,12 @@ public class MethodStoreQuery
} else {
// datastore
args = new Object[]{ sctx, _meta, (_subs) ? Boolean.TRUE
: Boolean.FALSE, params, fetch };
: Boolean.FALSE, paramMap, fetch };
rop = (ResultObjectProvider) invoke(q, args);
}
if (startIdx != 0 || endIdx != Long.MAX_VALUE)
rop = new RangeResultObjectProvider(rop, startIdx, endIdx);
if (range.start != 0 || range.end != Long.MAX_VALUE)
rop = new RangeResultObjectProvider(rop, range.start,range.end);
return rop;
}

View File

@ -248,11 +248,6 @@ public interface QueryContext {
*/
public Collection getAggregateListeners();
/**
* Helper method to transform the given parameters into an array.
*/
public Object[] toParameterArray(LinkedMap paramTypes, Map params);
/**
* Helper method to delete the objects found by executing a query on
* the given executor.

View File

@ -104,6 +104,7 @@ public class QueryImpl
private Class _resultClass = null;
private transient long _startIdx = 0;
private transient long _endIdx = Long.MAX_VALUE;
private transient boolean _rangeSet = false;
// remember the list of all the results we have returned so we
// can free their resources when close or closeAll is called
@ -486,35 +487,13 @@ public class QueryImpl
}
public long getStartRange() {
lock();
try {
assertOpen();
if (_startIdx != 0 || _endIdx != Long.MAX_VALUE
|| _compiled != null || _query == null || _broker == null)
return _startIdx;
// check again after compilation; maybe encoded in string
compileForCompilation();
return _startIdx;
} finally {
unlock();
}
assertOpen();
return _startIdx;
}
public long getEndRange() {
lock();
try {
assertOpen();
if (_startIdx != 0 || _endIdx != Long.MAX_VALUE
|| _compiled != null || _query == null || _broker == null)
return _endIdx;
// check again after compilation; maybe encoded in string
compileForCompilation();
return _endIdx;
} finally {
unlock();
}
assertOpen();
return _endIdx;
}
public void setRange(long start, long end) {
@ -532,6 +511,7 @@ public class QueryImpl
// allowed modification: no read-only check
_startIdx = start;
_endIdx = end;
_rangeSet = true;
} finally {
unlock();
}
@ -703,7 +683,7 @@ public class QueryImpl
else
es[i] = _storeQuery.newDataStoreExecutor(metas[i], true);
}
return new MergedExecutor(es, this);
return new MergedExecutor(es);
} catch (OpenJPAException ke) {
throw ke;
} catch (RuntimeException re) {
@ -799,16 +779,18 @@ public class QueryImpl
StoreQuery.Executor ex = (isInMemory(operation))
? compileForInMemory(comp) : compileForDataStore(comp);
assertParameters(ex, params);
Object[] arr = (params.isEmpty()) ? StoreQuery.EMPTY_OBJECTS :
toParameterArray(ex.getParameterTypes(_storeQuery), params);
assertParameters(ex, arr);
if (_log.isTraceEnabled())
logExecution(operation, params);
if (operation == OP_SELECT)
return execute(ex, params);
return execute(ex, arr);
if (operation == OP_DELETE)
return delete(ex, params);
return delete(ex, arr);
if (operation == OP_UPDATE)
return update(ex, params);
return update(ex, arr);
throw new UnsupportedException();
} catch (OpenJPAException ke) {
throw ke;
@ -847,7 +829,7 @@ public class QueryImpl
return ((Number) execute(OP_UPDATE, params)).longValue();
}
public Object[] toParameterArray(LinkedMap paramTypes, Map params) {
private Object[] toParameterArray(LinkedMap paramTypes, Map params) {
if (params == null || params.isEmpty())
return StoreQuery.EMPTY_OBJECTS;
@ -940,16 +922,18 @@ public class QueryImpl
private Object execute(StoreQuery.Executor ex, Object[] params)
throws Exception {
// if this is an impossible result range, return null / empty list
if (_startIdx >= _endIdx)
StoreQuery.Range range = new StoreQuery.Range(_startIdx, _endIdx);
if (!_rangeSet)
ex.getRange(_storeQuery, params, range);
if (range.start >= range.end)
return emptyResult(ex);
// execute; if we have a result class or we have only one result
// and so need to remove it from its array, wrap in a packing rop
boolean lrs = isLRS();
ResultObjectProvider rop = ex.executeQuery(_storeQuery, params, lrs,
_startIdx, _endIdx);
range.lrs = isLRS(range.start, range.end);
ResultObjectProvider rop = ex.executeQuery(_storeQuery, params, range);
try {
return toResult(ex, rop, lrs);
return toResult(ex, rop, range);
} catch (Exception e) {
if (rop != null)
try { rop.close(); } catch (Exception e2) {}
@ -957,48 +941,6 @@ public class QueryImpl
}
}
/**
* Execute the query using the given compilation, executor, and parameter
* values. All other execute methods delegate to this one or to
* {@link #execute(StoreQuery.Executor,Object[])} after validation and
* locking.
*/
private Object execute(StoreQuery.Executor ex, Map params)
throws Exception {
// if this is an impossible result range, return null / empty list
if (_startIdx >= _endIdx)
return emptyResult(ex);
// execute; if we have a result class or we have only one result
// and so need to remove it from its array, wrap in a packing rop
boolean lrs = isLRS();
ResultObjectProvider rop = ex.executeQuery(_storeQuery, params, lrs,
_startIdx, _endIdx);
try {
return toResult(ex, rop, lrs);
} catch (Exception e) {
if (rop != null)
try {
rop.close();
} catch (Exception e2) {
}
throw e;
}
}
/**
* Delete the query using the given executor, and parameter
* values. All other execute methods delegate to this one or to
* {@link #delete(StoreQuery.Executor,Object[])} after validation and
* locking. The return value will be a Number indicating the number of
* instances deleted.
*/
private Number delete(StoreQuery.Executor ex, Map params)
throws Exception {
assertBulkModify();
return ex.executeDelete(_storeQuery, params);
}
/**
* Delete the query using the given executor, and parameter
* values. All other execute methods delegate to this one or to
@ -1008,7 +950,7 @@ public class QueryImpl
*/
private Number delete(StoreQuery.Executor ex, Object[] params)
throws Exception {
assertBulkModify();
assertBulkModify(ex, params);
return ex.executeDelete(_storeQuery, params);
}
@ -1030,19 +972,6 @@ public class QueryImpl
}
}
/**
* Update the query using the given executor and parameter
* values. All other execute methods delegate to this one or to
* {@link #update(StoreQuery.Executor,Object[])} after validation and
* locking. The return value will be a Number indicating the number of
* instances updated.
*/
private Number update(StoreQuery.Executor ex, Map params)
throws Exception {
assertBulkModify();
return ex.executeUpdate(_storeQuery, params);
}
/**
* Update the query using the given compilation, executor, and parameter
* values. All other execute methods delegate to this one or to
@ -1052,7 +981,7 @@ public class QueryImpl
*/
private Number update(StoreQuery.Executor ex, Object[] params)
throws Exception {
assertBulkModify();
assertBulkModify(ex, params);
return ex.executeUpdate(_storeQuery, params);
}
@ -1086,7 +1015,7 @@ public class QueryImpl
Map.Entry e = (Map.Entry) it.next();
FieldMetaData fmd = (FieldMetaData) e.getKey();
if (!(e.getValue() instanceof Constant))
throw new UserException(_loc.get("only-update-primitives"));
throw new UserException(_loc.get("only-update-constants"));
Constant value = (Constant) e.getValue();
Object val = value.getValue(params);
@ -1195,8 +1124,8 @@ public class QueryImpl
/**
* Return whether this should be treated as a potential large result set.
*/
private boolean isLRS() {
long range = _endIdx - _startIdx;
private boolean isLRS(long start, long end) {
long range = end - start;
return _fc.getFetchBatchSize() >= 0
&& !(range <= _fc.getFetchBatchSize()
|| (_fc.getFetchBatchSize() == 0 && range <= 50));
@ -1206,7 +1135,7 @@ public class QueryImpl
* Return the query result for the given result object provider.
*/
protected Object toResult(StoreQuery.Executor ex, ResultObjectProvider rop,
boolean lrs)
StoreQuery.Range range)
throws Exception {
// pack projections if necessary
String[] aliases = ex.getProjectionAliases(_storeQuery);
@ -1220,13 +1149,13 @@ public class QueryImpl
// if single result, extract it
if (_unique == Boolean.TRUE || (aliases.length > 0
&& !ex.hasGrouping(_storeQuery) && ex.isAggregate(_storeQuery)))
return singleResult(rop);
return singleResult(rop, range);
// now that we've executed the query, we can call isAggregate and
// hasGrouping efficiently
boolean detach = (_broker.getAutoDetach() &
AutoDetach.DETACH_NONTXREAD) > 0 && !_broker.isActive();
lrs = lrs && !ex.isAggregate(_storeQuery)
boolean lrs = range.lrs && !ex.isAggregate(_storeQuery)
&& !ex.hasGrouping(_storeQuery);
ResultList res = (!detach && lrs) ? _fc.newResultList(rop)
: new EagerResultList(rop);
@ -1281,7 +1210,8 @@ public class QueryImpl
* Extract an expected single result from the given provider. Used when
* the result is an ungrouped aggregate or the unique flag is set to true.
*/
private Object singleResult(ResultObjectProvider rop)
private Object singleResult(ResultObjectProvider rop,
StoreQuery.Range range)
throws Exception {
rop.open();
try {
@ -1293,7 +1223,7 @@ public class QueryImpl
Object single = null;
if (next) {
single = rop.getResultObject();
if (_endIdx != _startIdx + 1 && rop.next())
if (range.end != range.start + 1 && rop.next())
throw new InvalidStateException(_loc.get("not-unique",
_class, _query));
}
@ -1397,10 +1327,13 @@ public class QueryImpl
assertOpen();
StoreQuery.Executor ex = compileForExecutor();
assertParameters(ex, params);
Object[] arr = toParameterArray(ex.getParameterTypes(_storeQuery),
params);
return ex.getDataStoreActions(_storeQuery, arr, _startIdx, _endIdx);
assertParameters(ex, arr);
StoreQuery.Range range = new StoreQuery.Range(_startIdx, _endIdx);
if (!_rangeSet)
ex.getRange(_storeQuery, arr, range);
return ex.getDataStoreActions(_storeQuery, arr, range);
} catch (OpenJPAException ke) {
throw ke;
} catch (Exception e) {
@ -1654,44 +1587,16 @@ public class QueryImpl
* Check that we are in a state to be able to perform a bulk operation;
* also flush the current modfications if any elements are currently dirty.
*/
private void assertBulkModify() {
private void assertBulkModify(StoreQuery.Executor ex, Object[] params) {
_broker.assertActiveTransaction();
if (_startIdx != 0 || _endIdx != Long.MAX_VALUE)
throw new UserException(_loc.get("no-modify-range"));
if (_resultClass != null)
throw new UserException(_loc.get("no-modify-resultclass"));
}
/**
* Checks that the passed parameters match the declarations.
*/
private void assertParameters(StoreQuery.Executor ex, Map params) {
if (!_storeQuery.requiresParameterDeclarations())
return;
// check that all declared parameters are given compatible values
LinkedMap paramTypes = ex.getParameterTypes(_storeQuery);
if (paramTypes != null && !paramTypes.isEmpty()) {
Map.Entry entry;
for (Iterator itr = paramTypes.entrySet().iterator();
itr.hasNext();) {
entry = (Map.Entry) itr.next();
if (!params.containsKey(entry.getKey()))
throw new UserException(_loc.get("unbound-param",
entry.getKey()));
if (((Class) entry.getValue()).isPrimitive()
&& params.get(entry.getKey()) == null)
throw new UserException(_loc.get("null-primitive-param",
entry.getKey()));
}
}
// check that there are no extra params
int typeCount = (paramTypes == null) ? 0 : paramTypes.size();
int paramCount = (params == null) ? 0 : params.size();
if (paramCount > typeCount)
throw new UserException(_loc.get("extra-params", new Object[]
{ new Integer(typeCount), new Integer(paramCount) }));
StoreQuery.Range range = new StoreQuery.Range();
ex.getRange(_storeQuery, params, range);
if (range.start != 0 || range.end != Long.MAX_VALUE)
throw new UserException(_loc.get("no-modify-range"));
}
/**
@ -1708,7 +1613,7 @@ public class QueryImpl
paramTypes.keySet()));
if (typeCount < params.length)
throw new UserException(_loc.get("extra-params", new Object[]
{ new Integer(typeCount), new Integer(params.length) }));
{ String.valueOf(typeCount), String.valueOf(params.length) }));
Iterator itr = paramTypes.entrySet().iterator();
Map.Entry entry;
@ -1761,31 +1666,28 @@ public class QueryImpl
implements StoreQuery.Executor {
private final StoreQuery.Executor[] _executors;
private final QueryContext _ctx;
public MergedExecutor(StoreQuery.Executor[] executors,
QueryContext ctx) {
public MergedExecutor(StoreQuery.Executor[] executors) {
_executors = executors;
_ctx = ctx;
}
public ResultObjectProvider executeQuery(StoreQuery q,
Object[] params, boolean lrs, long startIdx, long endIdx) {
Object[] params, StoreQuery.Range range) {
if (_executors.length == 1)
return _executors[0].executeQuery(q, params, lrs, startIdx,
endIdx);
return _executors[0].executeQuery(q, params, range);
// use lrs settings if we couldn't take advantage of the start index
// so that hopefully the skip to the start will be efficient
lrs = lrs || (startIdx > 0
&& _ctx.getFetchConfiguration().getFetchBatchSize() >= 0);
StoreQuery.Range ropRange = new StoreQuery.Range(0, range.end);
ropRange.lrs = range.lrs || (range.start > 0 && q.getContext().
getFetchConfiguration().getFetchBatchSize() >= 0);
// execute the query; we cannot use the lower bound of the result
// range, but we can take advantage of the upper bound
ResultObjectProvider[] rops =
new ResultObjectProvider[_executors.length];
for (int i = 0; i < _executors.length; i++)
rops[i] = _executors[i].executeQuery(q, params, lrs, 0, endIdx);
rops[i] = _executors[i].executeQuery(q, params, ropRange);
boolean[] asc = _executors[0].getAscending(q);
ResultObjectProvider rop;
@ -1796,41 +1698,9 @@ public class QueryImpl
_executors, q, params);
// if there is a lower bound, wrap in range rop
if (startIdx != 0)
rop = new RangeResultObjectProvider(rop, startIdx, endIdx);
return rop;
}
public ResultObjectProvider executeQuery(StoreQuery q, Map params,
boolean lrs, long startIdx, long endIdx) {
if (_executors.length == 1)
return _executors[0].executeQuery(q, params, lrs, startIdx,
endIdx);
// use lrs settings if we couldn't take advantage of the start index
// so that hopefully the skip to the start will be efficient
lrs = lrs || (startIdx > 0
&& _ctx.getFetchConfiguration().getFetchBatchSize() >= 0);
// execute the query; we cannot use the lower bound of the result
// range, but we can take advantage of the upper bound
ResultObjectProvider[] rops =
new ResultObjectProvider[_executors.length];
for (int i = 0; i < _executors.length; i++)
rops[i] = _executors[i].executeQuery(q, params, lrs, 0, endIdx);
boolean[] asc = _executors[0].getAscending(q);
ResultObjectProvider rop;
if (asc.length == 0)
rop = new MergedResultObjectProvider(rops);
else
rop = new OrderingMergedResultObjectProvider(rops, asc,
_executors, q, _ctx.toParameterArray
(_executors[0].getParameterTypes(q), params));
// if there is a lower bound, wrap in range rop
if (startIdx != 0)
rop = new RangeResultObjectProvider(rop, startIdx, endIdx);
if (range.start != 0)
rop = new RangeResultObjectProvider(rop, range.start,
range.end);
return rop;
}
@ -1841,13 +1711,6 @@ public class QueryImpl
return Numbers.valueOf(num);
}
public Number executeDelete(StoreQuery q, Map params) {
long num = 0;
for (int i = 0; i < _executors.length; i++)
num += _executors[i].executeDelete(q, params).longValue();
return Numbers.valueOf(num);
}
public Number executeUpdate(StoreQuery q, Object[] params) {
long num = 0;
for (int i = 0; i < _executors.length; i++)
@ -1855,24 +1718,16 @@ public class QueryImpl
return Numbers.valueOf(num);
}
public Number executeUpdate(StoreQuery q, Map params) {
long num = 0;
for (int i = 0; i < _executors.length; i++)
num += _executors[i].executeUpdate(q, params).longValue();
return Numbers.valueOf(num);
}
public String[] getDataStoreActions(StoreQuery q, Object[] params,
long startIdx, long endIdx) {
StoreQuery.Range range) {
if (_executors.length == 1)
return _executors[0].getDataStoreActions(q, params,
startIdx, endIdx);
return _executors[0].getDataStoreActions(q, params, range);
List results = new ArrayList(_executors.length);
StoreQuery.Range ropRange = new StoreQuery.Range(0L, range.end);
String[] actions;
for (int i = 0; i < _executors.length; i++) {
actions = _executors[i].getDataStoreActions(q, params, 0,
endIdx);
actions = _executors[i].getDataStoreActions(q, params,ropRange);
if (actions != null && actions.length > 0)
results.addAll(Arrays.asList(actions));
}
@ -1883,6 +1738,11 @@ public class QueryImpl
_executors[0].validate(q);
}
public void getRange(StoreQuery q, Object[] params,
StoreQuery.Range range) {
_executors[0].getRange(q, params, range);
}
public Object getOrderingValue(StoreQuery q, Object[] params,
Object resultObject, int idx) {
// unfortunately, at this point (must be a merged rop containing

View File

@ -89,9 +89,12 @@ class SingleFieldManager
proxy = checkProxy();
if (proxy == null) {
proxy = (Proxy) _sm.newFieldProxy(field);
if (objval != null)
((Calendar) proxy).setTime(((Calendar) objval).
getTime());
if (objval != null) {
Calendar pcal = (Calendar) proxy;
Calendar ocal = (Calendar) objval;
pcal.setTime(ocal.getTime());
pcal.setTimeZone(ocal.getTimeZone());
}
ret = true;
}
break;

View File

@ -158,6 +158,23 @@ public interface StoreQuery
*/
public boolean supportsParameterDeclarations();
/**
* A query result range.
*/
public static class Range {
public long start = 0L;
public long end = Long.MAX_VALUE;
public boolean lrs = false;
public Range() {
}
public Range(long start, long end) {
this.start = start;
this.end = end;
}
}
/**
* An executor provides a uniform interface to the mechanism for executing
* either an in-memory or datastore query. In the common case, the
@ -179,17 +196,8 @@ public interface StoreQuery
* aggregate and does not have grouping
* @see #isPacking
*/
public ResultObjectProvider executeQuery(StoreQuery q,
Object[] params, boolean lrs, long startIdx, long endIdx);
/**
* Return the result of executing this query with the given parameter
* values. Most implementation will use
* {@link QueryContext#toParameterArray} to transform the parameters
* into an array and invoke the array version of this method.
*/
public ResultObjectProvider executeQuery(StoreQuery q, Map params,
boolean lrs, long startIdx, long endIdx);
public ResultObjectProvider executeQuery(StoreQuery q, Object[] params,
Range range);
/**
* Deleted the objects that result from the execution of the
@ -197,36 +205,30 @@ public interface StoreQuery
*/
public Number executeDelete(StoreQuery q, Object[] params);
/**
* Deleted the objects that result from the execution of the
* query, retuning the number of objects that were deleted.
*/
public Number executeDelete(StoreQuery q, Map params);
/**
* Updates the objects that result from the execution of the
* query, retuning the number of objects that were updated.
*/
public Number executeUpdate(StoreQuery q, Object[] params);
/**
* Updates the objects that result from the execution of the
* query, retuning the number of objects that were updated.
*/
public Number executeUpdate(StoreQuery q, Map params);
/**
* Return a description of the commands that will be sent to
* the datastore in order to execute the query.
*/
public String[] getDataStoreActions(StoreQuery q, Object[] params,
long startIdx, long endIdx);
Range range);
/**
* Validate components of query.
*/
public void validate(StoreQuery q);
/**
* Mutate the given range to set any range information stored in
* the query string and/or parameters.
*/
public void getRange(StoreQuery q, Object[] params, Range range);
/**
* Extract the value of the <code>orderIndex</code>th ordering
* expression in {@link Query#getOrderingClauses} from the

View File

@ -16,12 +16,11 @@
package org.apache.openjpa.kernel.exps;
/**
* Interface for any constant value.
* Interface for any query constant value.
*
* @author Marc Prud'hommeaux
*/
public interface Constant
extends Value {
public interface Constant {
/**
* Return the value for this constant given the specified parameters.

View File

@ -22,7 +22,7 @@ package org.apache.openjpa.kernel.exps;
* @nojavadoc
*/
public interface Literal
extends Constant {
extends Value, Constant {
public static final int TYPE_UNKNOWN = 0;
public static final int TYPE_NUMBER = 1;

View File

@ -22,7 +22,7 @@ package org.apache.openjpa.kernel.exps;
* @nojavadoc
*/
public interface Parameter
extends Constant {
extends Value, Constant {
/**
* Set the index of this parameter.

View File

@ -15,6 +15,8 @@
*/
package org.apache.openjpa.kernel.exps;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.collections.map.LinkedMap;
@ -40,8 +42,7 @@ public class QueryExpressions {
/**
* Map of {@link FieldMetaData},{@link Value} for update statements.
*/
public Map updates = null;
public Map updates = Collections.EMPTY_MAP;
public int distinct = DISTINCT_AUTO;
public String alias = null;
public Value[] projections = EMPTY_VALUES;
@ -59,8 +60,12 @@ public class QueryExpressions {
public int operation = QueryOperations.OP_SELECT;
public ClassMetaData[] accessPath = StoreQuery.EMPTY_METAS;
public String[] fetchPaths = StoreQuery.EMPTY_STRINGS;
public Value[] range = EMPTY_VALUES;
private Boolean _aggregate = null;
/**
* Whether this is an aggregate results.
*/
public boolean isAggregate() {
if (projections.length == 0)
return false;
@ -70,6 +75,15 @@ public class QueryExpressions {
return _aggregate.booleanValue();
}
/**
* Add an update.
*/
public void putUpdate(FieldMetaData fmd, Value val) {
if (updates == Collections.EMPTY_MAP)
updates = new HashMap();
updates.put(fmd, val);
}
/**
* Visitor to determine whether our projections are aggregates.
*/

View File

@ -42,8 +42,7 @@ class SubQ
return _alias;
}
public void setQueryExpressions(QueryExpressions q, long startIdx,
long endIdx) {
public void setQueryExpressions(QueryExpressions q) {
}
public Class getType() {

View File

@ -32,6 +32,5 @@ public interface Subquery
/**
* Set the parsed subquery.
*/
public void setQueryExpressions(QueryExpressions query, long startIdx,
long endIdx);
public void setQueryExpressions(QueryExpressions query);
}

View File

@ -444,30 +444,18 @@ class JPQLExpressionBuilder
protected void evalSetClause(QueryExpressions exps) {
// handle SET field = value
JPQLNode[] nodes = root().findChildrenByID(JJTUPDATEITEM);
Map updates = null;
for (int i = 0; nodes != null && i < nodes.length; i++) {
if (updates == null)
updates = new HashMap();
FieldMetaData field = getPath(firstChild(nodes[i])).last();
Value val = getValue(onlyChild(lastChild(nodes[i])));
updates.put(field, val);
exps.putUpdate(field, val);
}
if (updates != null)
exps.updates = updates;
}
private Expression evalWhereClause(QueryExpressions exps) {
// evaluate the WHERE clause
JPQLNode whereNode = root().findChildByID(JJTWHERE, false);
if (whereNode == null)
return null;
return (Expression) eval(whereNode);
}
@ -1110,7 +1098,7 @@ class JPQLExpressionBuilder
try {
QueryExpressions subexp = getQueryExpressions();
subq.setQueryExpressions(subexp, 0, Long.MAX_VALUE);
subq.setQueryExpressions(subexp);
return subq;
} finally {
// remove the subquery parse context

View File

@ -0,0 +1,28 @@
package sun.misc;
/**
* Compilation stub for pre-1.4.2 JREs. Thanks to it, the whole backport
* concurrency package compiles and works with 1.4.2 as well as wih earlier
* JREs, and takes advantage of native Perf class when running on 1.4.2 while
* seamlessly falling back to System.currentTimeMillis() on previous JREs. This
* class should NOT be included in the binary distribution of backport.
*
* @author Dawid Kurzyniec
* @version 1.0
*/
public final class Perf {
private static final Perf perf = new Perf();
public static Perf getPerf() { return perf; }
private Perf() {}
public long highResCounter() {
return System.currentTimeMillis();
}
public long highResFrequency() {
return 1000L;
}
}

View File

@ -4,8 +4,6 @@ in-mem-subquery: Subqueries are not supported for queries that execute \
not-comp: The evaluation of the ordering expression of this query produced \
non-comparable values "{0}" and "{1}". Please check that the ordering \
clause is valid.
bad-wild: The wildcard string "{0}" is invalid.
bad-regexp: The regular expression string "{0}" is invalid.
agg-in-filter: If you use an aggregate function in a query filter, you must \
make sure to only invoke the aggregate on collections.
parse-error: An error occurred while parsing the query filter "{1}". \

View File

@ -166,7 +166,9 @@ inverse-consistency: An inverse inconsistency in the object model was \
detected while flushing the field "{0}" of the instance with id "{1}" \
in context "{2}".
no-brokerfactory: You did not name the factory class with the required \
property org.apache.openjpa.BrokerFactory.
property openjpa.BrokerFactory. Normally this property defaults \
appropriately; have you forgotten to include all the OpenJPA jars in your \
classpath?
brokerfactory-excep: There was an error when invoking the static \
getInstance method on the named factory class "{0}". See the \
nested exception for details.
@ -226,7 +228,7 @@ force-in-mem: This query on type "{0}" must load the entire candidate class \
there are dirty instances that may affect the query''s outcome in the \
cache.
cant-exec-inmem: Queries of this type ("{0}") cannot be executed in-memory. \
Either set IgnoreCache to true, set the org.apache.openjpa.FlushBeforeQueries \
Either set IgnoreCache to true, set the openjpa.FlushBeforeQueries \
property to true, or execute the query before changing any instances in \
the transaction.
executing-query: Executing query: {0}
@ -269,9 +271,9 @@ bad-method-class: You set the method name of this openjpa.MethodQL query to \
"{1}", but class "{0}" is not a valid class name. Make sure to fully \
qualify the class name or to import its package into this query if the \
class is not in the query candidate class'' package.
method-not-static: Method "{0}" named in the org.apache.openjpa.MethodQL query must be static.
method-not-static: Method "{0}" named in the MethodQL query must be static.
no-method: You must set the query filter to the name of the method to execute \
for this org.apache.openjpa.MethodQL query instance.
for this MethodQL query instance.
method-error: There was an error invoking method "{0}" with arguments "{1}".
bad-param-type: The type "{0}" as used in the parameter declarations \
could not be found in the imports.
@ -295,13 +297,15 @@ bad-inmem-method: Method "{0}(StoreContext, ClassMetaData, boolean, Object, \
true.
bad-datastore-method: Method "{0}(StoreContext, ClassMetaData, boolean, Map, \
FetchConfiguration)" is not declared in type "{1}". Check \
the method name supplied in your org.apache.openjpa.MethodQL query filter. OpenJPA is \
the method name supplied in your MethodQL query filter. OpenJPA is \
attempting to execute this query against the datastore; if you implemented \
the in-memory method instead (a method with the same signature but with an \
Object argument) and want this query to execute in-memory, supplly a \
Collection of candidates to filter.
only-update-primitives: Bulk update queries when executed in memory \
may only change the value of primitives and simple Object fields.
only-update-constants: Bulk update queries when executed in memory \
may only update to constant values.
only-range-constants: Range values must be numeric constants. Illegal query: \
{0}
no-savepoint-copy: Unable to copy field "{0}" for savepoint.
savepoint-exists: A savepoint with the name "{0}" already exists. \
Each savepoint name must be unique.

View File

@ -31,7 +31,7 @@ class CondVar implements Condition, java.io.Serializable {
*/
protected final ExclusiveLock lock;
/* *
/**
* Create a new CondVar that relies on the given mutual exclusion lock.
* @param lock A non-reentrant mutual exclusion lock.
*/

View File

@ -16,8 +16,8 @@ bad-second: "{0}" declares a secondary table on columns that do not support \
unique-constraints: Detected declared unique constraints on "{0}". OpenJPA \
does not yet support the @UniqueConstraint annotation.
inconsist-col-attrs: Detected inconsistent values of "unique" on different \
columns of "{0}". OpenJPA does not yet support different per-column unique \
values. All columns for this mapping must use the same values.
columns of "{0}". OpenJPA does not yet support different per-column \
unique values. All columns for this mapping must use the same values.
pk-as-fk: The "usePKasFK" attribute is not yet supported. Mapping your \
OneToOne using JoinColumns that match your id property columns will work.
no-override-name: Missing "name" property on mapping override for "{0}".